From 89d819e8decb70e421550011aa74da9f014cde97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bernhard=20Rosenkr=C3=A4nzer?= Date: Mon, 28 Aug 2017 17:14:23 +0200 Subject: Import initial gcc 7.2 build MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Bernhard Rosenkränzer --- Android.mk | 0 COPYING | 340 + COPYING.LIB | 510 + COPYING.RUNTIME | 73 + COPYING3 | 674 + COPYING3.LIB | 165 + MODULE_LICENSE_GPL | 0 NOTICE | 340 + arm-linux-androideabi/bin/ar | 1 + arm-linux-androideabi/bin/as | 1 + arm-linux-androideabi/bin/ld | 1 + arm-linux-androideabi/bin/ld.bfd | 1 + arm-linux-androideabi/bin/ld.gold | 1 + arm-linux-androideabi/bin/nm | 1 + arm-linux-androideabi/bin/objcopy | 1 + arm-linux-androideabi/bin/objdump | 1 + arm-linux-androideabi/bin/ranlib | 1 + arm-linux-androideabi/bin/strip | 1 + .../lib/ldscripts/armelf_linux_eabi.x | 246 + .../lib/ldscripts/armelf_linux_eabi.xbn | 243 + .../lib/ldscripts/armelf_linux_eabi.xc | 244 + .../lib/ldscripts/armelf_linux_eabi.xd | 245 + .../lib/ldscripts/armelf_linux_eabi.xdc | 244 + .../lib/ldscripts/armelf_linux_eabi.xdw | 244 + .../lib/ldscripts/armelf_linux_eabi.xn | 245 + .../lib/ldscripts/armelf_linux_eabi.xr | 166 + .../lib/ldscripts/armelf_linux_eabi.xs | 234 + .../lib/ldscripts/armelf_linux_eabi.xsc | 234 + .../lib/ldscripts/armelf_linux_eabi.xsw | 233 + .../lib/ldscripts/armelf_linux_eabi.xu | 167 + .../lib/ldscripts/armelf_linux_eabi.xw | 244 + .../lib/ldscripts/armelfb_linux_eabi.x | 246 + .../lib/ldscripts/armelfb_linux_eabi.xbn | 243 + .../lib/ldscripts/armelfb_linux_eabi.xc | 244 + .../lib/ldscripts/armelfb_linux_eabi.xd | 245 + .../lib/ldscripts/armelfb_linux_eabi.xdc | 244 + .../lib/ldscripts/armelfb_linux_eabi.xdw | 244 + .../lib/ldscripts/armelfb_linux_eabi.xn | 245 + .../lib/ldscripts/armelfb_linux_eabi.xr | 166 + .../lib/ldscripts/armelfb_linux_eabi.xs | 234 + .../lib/ldscripts/armelfb_linux_eabi.xsc | 234 + .../lib/ldscripts/armelfb_linux_eabi.xsw | 233 + .../lib/ldscripts/armelfb_linux_eabi.xu | 167 + .../lib/ldscripts/armelfb_linux_eabi.xw | 244 + arm-linux-androideabi/lib/libatomic.a | Bin 0 -> 492240 bytes arm-linux-androideabi/lib/libgomp.a | Bin 0 -> 1127844 bytes arm-linux-androideabi/lib/libgomp.spec | 3 + arm-linux-androideabi/lib/libobjc.a | Bin 0 -> 384536 bytes bin/arm-linux-androideabi-addr2line | Bin 0 -> 733528 bytes bin/arm-linux-androideabi-ar | Bin 0 -> 760832 bytes bin/arm-linux-androideabi-as | Bin 0 -> 1326624 bytes bin/arm-linux-androideabi-c++ | 1 + bin/arm-linux-androideabi-c++filt | Bin 0 -> 731512 bytes bin/arm-linux-androideabi-cpp | Bin 0 -> 946184 bytes bin/arm-linux-androideabi-dwp | Bin 0 -> 2794408 bytes bin/arm-linux-androideabi-elfedit | Bin 0 -> 27976 bytes bin/arm-linux-androideabi-g++ | 66 + bin/arm-linux-androideabi-gcc | 66 + bin/arm-linux-androideabi-gcc-7.2 | 1 + bin/arm-linux-androideabi-gcc-7.2.0 | Bin 0 -> 945256 bytes bin/arm-linux-androideabi-gcc-ar | Bin 0 -> 26536 bytes bin/arm-linux-androideabi-gcc-nm | Bin 0 -> 26504 bytes bin/arm-linux-androideabi-gcc-ranlib | Bin 0 -> 26504 bytes bin/arm-linux-androideabi-gcov | Bin 0 -> 553864 bytes bin/arm-linux-androideabi-gcov-dump | Bin 0 -> 451480 bytes bin/arm-linux-androideabi-gcov-tool | Bin 0 -> 475384 bytes bin/arm-linux-androideabi-gprof | Bin 0 -> 801368 bytes bin/arm-linux-androideabi-ld | 1 + bin/arm-linux-androideabi-ld.bfd | Bin 0 -> 1221216 bytes bin/arm-linux-androideabi-ld.gold | Bin 0 -> 4691976 bytes bin/arm-linux-androideabi-nm | Bin 0 -> 744664 bytes bin/arm-linux-androideabi-objcopy | Bin 0 -> 919640 bytes bin/arm-linux-androideabi-objdump | Bin 0 -> 1160664 bytes bin/arm-linux-androideabi-ranlib | Bin 0 -> 760832 bytes bin/arm-linux-androideabi-readelf | Bin 0 -> 441944 bytes bin/arm-linux-androideabi-size | Bin 0 -> 734424 bytes bin/arm-linux-androideabi-strings | Bin 0 -> 733720 bytes bin/arm-linux-androideabi-strip | Bin 0 -> 919672 bytes bin/real-arm-linux-androideabi-g++ | Bin 0 -> 949352 bytes bin/real-arm-linux-androideabi-gcc | Bin 0 -> 945256 bytes lib/gcc/arm-linux-androideabi/7.2.0/crtbegin.o | Bin 0 -> 2508 bytes lib/gcc/arm-linux-androideabi/7.2.0/crtbeginS.o | Bin 0 -> 2724 bytes lib/gcc/arm-linux-androideabi/7.2.0/crtbeginT.o | Bin 0 -> 2508 bytes lib/gcc/arm-linux-androideabi/7.2.0/crtend.o | Bin 0 -> 964 bytes lib/gcc/arm-linux-androideabi/7.2.0/crtendS.o | Bin 0 -> 964 bytes lib/gcc/arm-linux-androideabi/7.2.0/crtfastmath.o | Bin 0 -> 1204 bytes .../7.2.0/include-fixed/README | 14 + .../7.2.0/include-fixed/limits.h | 197 + .../7.2.0/include-fixed/linux/a.out.h | 229 + .../7.2.0/include-fixed/syslimits.h | 8 + .../arm-linux-androideabi/7.2.0/include/arm_acle.h | 241 + .../arm-linux-androideabi/7.2.0/include/arm_cmse.h | 199 + .../arm-linux-androideabi/7.2.0/include/arm_fp16.h | 255 + .../arm-linux-androideabi/7.2.0/include/arm_neon.h | 18020 +++++++++++++++++++ .../arm-linux-androideabi/7.2.0/include/float.h | 506 + lib/gcc/arm-linux-androideabi/7.2.0/include/gcov.h | 41 + .../arm-linux-androideabi/7.2.0/include/iso646.h | 45 + .../arm-linux-androideabi/7.2.0/include/mmintrin.h | 1836 ++ .../7.2.0/include/objc/NXConstStr.h | 51 + .../7.2.0/include/objc/Object.h | 62 + .../7.2.0/include/objc/Protocol.h | 54 + .../7.2.0/include/objc/message.h | 119 + .../7.2.0/include/objc/objc-decls.h | 46 + .../7.2.0/include/objc/objc-exception.h | 109 + .../7.2.0/include/objc/objc-sync.h | 69 + .../7.2.0/include/objc/objc.h | 151 + .../7.2.0/include/objc/runtime.h | 1143 ++ .../arm-linux-androideabi/7.2.0/include/objc/thr.h | 116 + lib/gcc/arm-linux-androideabi/7.2.0/include/omp.h | 165 + .../arm-linux-androideabi/7.2.0/include/openacc.h | 131 + .../arm-linux-androideabi/7.2.0/include/stdalign.h | 39 + .../arm-linux-androideabi/7.2.0/include/stdarg.h | 127 + .../7.2.0/include/stdatomic.h | 243 + .../arm-linux-androideabi/7.2.0/include/stdbool.h | 54 + .../arm-linux-androideabi/7.2.0/include/stddef.h | 451 + .../arm-linux-androideabi/7.2.0/include/stdfix.h | 204 + .../7.2.0/include/stdint-gcc.h | 364 + .../arm-linux-androideabi/7.2.0/include/stdint.h | 14 + .../7.2.0/include/stdnoreturn.h | 35 + .../7.2.0/include/unwind-arm-common.h | 250 + .../arm-linux-androideabi/7.2.0/include/unwind.h | 93 + .../arm-linux-androideabi/7.2.0/include/varargs.h | 7 + lib/gcc/arm-linux-androideabi/7.2.0/libgcc.a | Bin 0 -> 23541702 bytes lib/gcc/arm-linux-androideabi/7.2.0/libgcov.a | Bin 0 -> 483840 bytes lib64/libcc1.so | 1 + lib64/libcc1.so.0 | 1 + lib64/libcc1.so.0.0.0 | Bin 0 -> 118304 bytes libexec/gcc/arm-linux-androideabi/7.2.0/cc1 | Bin 0 -> 20117256 bytes libexec/gcc/arm-linux-androideabi/7.2.0/cc1obj | Bin 0 -> 20323400 bytes libexec/gcc/arm-linux-androideabi/7.2.0/cc1objplus | Bin 0 -> 21666280 bytes libexec/gcc/arm-linux-androideabi/7.2.0/cc1plus | Bin 0 -> 21456072 bytes libexec/gcc/arm-linux-androideabi/7.2.0/collect2 | Bin 0 -> 537656 bytes .../arm-linux-androideabi/7.2.0/liblto_plugin.so | 1 + .../arm-linux-androideabi/7.2.0/liblto_plugin.so.0 | 1 + .../7.2.0/liblto_plugin.so.0.0.0 | Bin 0 -> 92112 bytes .../gcc/arm-linux-androideabi/7.2.0/lto-wrapper | Bin 0 -> 848792 bytes libexec/gcc/arm-linux-androideabi/7.2.0/lto1 | Bin 0 -> 19158184 bytes .../arm-linux-androideabi/7.2.0/plugin/gengtype | Bin 0 -> 184640 bytes repo.prop | 57 + toolchain.mk | 17 + 140 files changed, 33994 insertions(+) create mode 100644 Android.mk create mode 100644 COPYING create mode 100644 COPYING.LIB create mode 100644 COPYING.RUNTIME create mode 100644 COPYING3 create mode 100644 COPYING3.LIB create mode 100644 MODULE_LICENSE_GPL create mode 100644 NOTICE create mode 120000 arm-linux-androideabi/bin/ar create mode 120000 arm-linux-androideabi/bin/as create mode 120000 arm-linux-androideabi/bin/ld create mode 120000 arm-linux-androideabi/bin/ld.bfd create mode 120000 arm-linux-androideabi/bin/ld.gold create mode 120000 arm-linux-androideabi/bin/nm create mode 120000 arm-linux-androideabi/bin/objcopy create mode 120000 arm-linux-androideabi/bin/objdump create mode 120000 arm-linux-androideabi/bin/ranlib create mode 120000 arm-linux-androideabi/bin/strip create mode 100644 arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.x create mode 100644 arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xbn create mode 100644 arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xc create mode 100644 arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xd create mode 100644 arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdc create mode 100644 arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdw create mode 100644 arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xn create mode 100644 arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xr create mode 100644 arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xs create mode 100644 arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsc create mode 100644 arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsw create mode 100644 arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xu create mode 100644 arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xw create mode 100644 arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.x create mode 100644 arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xbn create mode 100644 arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xc create mode 100644 arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xd create mode 100644 arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdc create mode 100644 arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdw create mode 100644 arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xn create mode 100644 arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xr create mode 100644 arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xs create mode 100644 arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsc create mode 100644 arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsw create mode 100644 arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xu create mode 100644 arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xw create mode 100644 arm-linux-androideabi/lib/libatomic.a create mode 100644 arm-linux-androideabi/lib/libgomp.a create mode 100644 arm-linux-androideabi/lib/libgomp.spec create mode 100644 arm-linux-androideabi/lib/libobjc.a create mode 100755 bin/arm-linux-androideabi-addr2line create mode 100755 bin/arm-linux-androideabi-ar create mode 100755 bin/arm-linux-androideabi-as create mode 120000 bin/arm-linux-androideabi-c++ create mode 100755 bin/arm-linux-androideabi-c++filt create mode 100755 bin/arm-linux-androideabi-cpp create mode 100755 bin/arm-linux-androideabi-dwp create mode 100755 bin/arm-linux-androideabi-elfedit create mode 100755 bin/arm-linux-androideabi-g++ create mode 100755 bin/arm-linux-androideabi-gcc create mode 120000 bin/arm-linux-androideabi-gcc-7.2 create mode 100755 bin/arm-linux-androideabi-gcc-7.2.0 create mode 100755 bin/arm-linux-androideabi-gcc-ar create mode 100755 bin/arm-linux-androideabi-gcc-nm create mode 100755 bin/arm-linux-androideabi-gcc-ranlib create mode 100755 bin/arm-linux-androideabi-gcov create mode 100755 bin/arm-linux-androideabi-gcov-dump create mode 100755 bin/arm-linux-androideabi-gcov-tool create mode 100755 bin/arm-linux-androideabi-gprof create mode 120000 bin/arm-linux-androideabi-ld create mode 100755 bin/arm-linux-androideabi-ld.bfd create mode 100755 bin/arm-linux-androideabi-ld.gold create mode 100755 bin/arm-linux-androideabi-nm create mode 100755 bin/arm-linux-androideabi-objcopy create mode 100755 bin/arm-linux-androideabi-objdump create mode 100755 bin/arm-linux-androideabi-ranlib create mode 100755 bin/arm-linux-androideabi-readelf create mode 100755 bin/arm-linux-androideabi-size create mode 100755 bin/arm-linux-androideabi-strings create mode 100755 bin/arm-linux-androideabi-strip create mode 100755 bin/real-arm-linux-androideabi-g++ create mode 100755 bin/real-arm-linux-androideabi-gcc create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/crtbegin.o create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/crtbeginS.o create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/crtbeginT.o create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/crtend.o create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/crtendS.o create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/crtfastmath.o create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include-fixed/README create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include-fixed/limits.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include-fixed/linux/a.out.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include-fixed/syslimits.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/arm_acle.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/arm_cmse.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/arm_fp16.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/arm_neon.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/float.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/gcov.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/iso646.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/mmintrin.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/objc/NXConstStr.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/objc/Object.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/objc/Protocol.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/objc/message.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/objc/objc-decls.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/objc/objc-exception.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/objc/objc-sync.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/objc/objc.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/objc/runtime.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/objc/thr.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/omp.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/openacc.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/stdalign.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/stdarg.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/stdatomic.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/stdbool.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/stddef.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/stdfix.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/stdint-gcc.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/stdint.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/stdnoreturn.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/unwind-arm-common.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/unwind.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/include/varargs.h create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/libgcc.a create mode 100644 lib/gcc/arm-linux-androideabi/7.2.0/libgcov.a create mode 120000 lib64/libcc1.so create mode 120000 lib64/libcc1.so.0 create mode 100755 lib64/libcc1.so.0.0.0 create mode 100755 libexec/gcc/arm-linux-androideabi/7.2.0/cc1 create mode 100755 libexec/gcc/arm-linux-androideabi/7.2.0/cc1obj create mode 100755 libexec/gcc/arm-linux-androideabi/7.2.0/cc1objplus create mode 100755 libexec/gcc/arm-linux-androideabi/7.2.0/cc1plus create mode 100755 libexec/gcc/arm-linux-androideabi/7.2.0/collect2 create mode 120000 libexec/gcc/arm-linux-androideabi/7.2.0/liblto_plugin.so create mode 120000 libexec/gcc/arm-linux-androideabi/7.2.0/liblto_plugin.so.0 create mode 100755 libexec/gcc/arm-linux-androideabi/7.2.0/liblto_plugin.so.0.0.0 create mode 100755 libexec/gcc/arm-linux-androideabi/7.2.0/lto-wrapper create mode 100755 libexec/gcc/arm-linux-androideabi/7.2.0/lto1 create mode 100755 libexec/gcc/arm-linux-androideabi/7.2.0/plugin/gengtype create mode 100644 repo.prop create mode 100644 toolchain.mk diff --git a/Android.mk b/Android.mk new file mode 100644 index 0000000..e69de29 diff --git a/COPYING b/COPYING new file mode 100644 index 0000000..623b625 --- /dev/null +++ b/COPYING @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/COPYING.LIB b/COPYING.LIB new file mode 100644 index 0000000..2d2d780 --- /dev/null +++ b/COPYING.LIB @@ -0,0 +1,510 @@ + + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations +below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it +becomes a de-facto standard. To achieve this, non-free programs must +be allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control +compilation and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at least + three years, to give the same user the materials specified in + Subsection 6a, above, for a charge no more than the cost of + performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply, and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License +may add an explicit geographical distribution limitation excluding those +countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms +of the ordinary General Public License). + + To apply these terms, attach the following notices to the library. +It is safest to attach them to the start of each source file to most +effectively convey the exclusion of warranty; and each file should +have at least the "copyright" line and a pointer to where the full +notice is found. + + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or +your school, if any, to sign a "copyright disclaimer" for the library, +if necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James + Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + + diff --git a/COPYING.RUNTIME b/COPYING.RUNTIME new file mode 100644 index 0000000..e1b3c69 --- /dev/null +++ b/COPYING.RUNTIME @@ -0,0 +1,73 @@ +GCC RUNTIME LIBRARY EXCEPTION + +Version 3.1, 31 March 2009 + +Copyright (C) 2009 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +This GCC Runtime Library Exception ("Exception") is an additional +permission under section 7 of the GNU General Public License, version +3 ("GPLv3"). It applies to a given file (the "Runtime Library") that +bears a notice placed by the copyright holder of the file stating that +the file is governed by GPLv3 along with this Exception. + +When you use GCC to compile a program, GCC may combine portions of +certain GCC header files and runtime libraries with the compiled +program. The purpose of this Exception is to allow compilation of +non-GPL (including proprietary) programs to use, in this way, the +header files and runtime libraries covered by this Exception. + +0. Definitions. + +A file is an "Independent Module" if it either requires the Runtime +Library for execution after a Compilation Process, or makes use of an +interface provided by the Runtime Library, but is not otherwise based +on the Runtime Library. + +"GCC" means a version of the GNU Compiler Collection, with or without +modifications, governed by version 3 (or a specified later version) of +the GNU General Public License (GPL) with the option of using any +subsequent versions published by the FSF. + +"GPL-compatible Software" is software whose conditions of propagation, +modification and use would permit combination with GCC in accord with +the license of GCC. + +"Target Code" refers to output from any compiler for a real or virtual +target processor architecture, in executable form or suitable for +input to an assembler, loader, linker and/or execution +phase. Notwithstanding that, Target Code does not include data in any +format that is used as a compiler intermediate representation, or used +for producing a compiler intermediate representation. + +The "Compilation Process" transforms code entirely represented in +non-intermediate languages designed for human-written code, and/or in +Java Virtual Machine byte code, into Target Code. Thus, for example, +use of source code generators and preprocessors need not be considered +part of the Compilation Process, since the Compilation Process can be +understood as starting with the output of the generators or +preprocessors. + +A Compilation Process is "Eligible" if it is done using GCC, alone or +with other GPL-compatible software, or if it is done without using any +work based on GCC. For example, using non-GPL-compatible Software to +optimize any GCC intermediate representations would not qualify as an +Eligible Compilation Process. + +1. Grant of Additional Permission. + +You have permission to propagate a work of Target Code formed by +combining the Runtime Library with Independent Modules, even if such +propagation would otherwise violate the terms of GPLv3, provided that +all Target Code was generated by Eligible Compilation Processes. You +may then convey such a combination under terms of your choice, +consistent with the licensing of the Independent Modules. + +2. No Weakening of GCC Copyleft. + +The availability of this Exception does not imply any general +presumption that third-party software is unaffected by the copyleft +requirements of the license of GCC. + diff --git a/COPYING3 b/COPYING3 new file mode 100644 index 0000000..94a9ed0 --- /dev/null +++ b/COPYING3 @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/COPYING3.LIB b/COPYING3.LIB new file mode 100644 index 0000000..fc8a5de --- /dev/null +++ b/COPYING3.LIB @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/MODULE_LICENSE_GPL b/MODULE_LICENSE_GPL new file mode 100644 index 0000000..e69de29 diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000..623b625 --- /dev/null +++ b/NOTICE @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/arm-linux-androideabi/bin/ar b/arm-linux-androideabi/bin/ar new file mode 120000 index 0000000..8c48646 --- /dev/null +++ b/arm-linux-androideabi/bin/ar @@ -0,0 +1 @@ +../../bin/arm-linux-androideabi-ar \ No newline at end of file diff --git a/arm-linux-androideabi/bin/as b/arm-linux-androideabi/bin/as new file mode 120000 index 0000000..5378e97 --- /dev/null +++ b/arm-linux-androideabi/bin/as @@ -0,0 +1 @@ +../../bin/arm-linux-androideabi-as \ No newline at end of file diff --git a/arm-linux-androideabi/bin/ld b/arm-linux-androideabi/bin/ld new file mode 120000 index 0000000..dddda83 --- /dev/null +++ b/arm-linux-androideabi/bin/ld @@ -0,0 +1 @@ +../../bin/arm-linux-androideabi-ld \ No newline at end of file diff --git a/arm-linux-androideabi/bin/ld.bfd b/arm-linux-androideabi/bin/ld.bfd new file mode 120000 index 0000000..57728a0 --- /dev/null +++ b/arm-linux-androideabi/bin/ld.bfd @@ -0,0 +1 @@ +../../bin/arm-linux-androideabi-ld.bfd \ No newline at end of file diff --git a/arm-linux-androideabi/bin/ld.gold b/arm-linux-androideabi/bin/ld.gold new file mode 120000 index 0000000..266c472 --- /dev/null +++ b/arm-linux-androideabi/bin/ld.gold @@ -0,0 +1 @@ +../../bin/arm-linux-androideabi-ld.gold \ No newline at end of file diff --git a/arm-linux-androideabi/bin/nm b/arm-linux-androideabi/bin/nm new file mode 120000 index 0000000..03e24b6 --- /dev/null +++ b/arm-linux-androideabi/bin/nm @@ -0,0 +1 @@ +../../bin/arm-linux-androideabi-nm \ No newline at end of file diff --git a/arm-linux-androideabi/bin/objcopy b/arm-linux-androideabi/bin/objcopy new file mode 120000 index 0000000..2ec109f --- /dev/null +++ b/arm-linux-androideabi/bin/objcopy @@ -0,0 +1 @@ +../../bin/arm-linux-androideabi-objcopy \ No newline at end of file diff --git a/arm-linux-androideabi/bin/objdump b/arm-linux-androideabi/bin/objdump new file mode 120000 index 0000000..2303680 --- /dev/null +++ b/arm-linux-androideabi/bin/objdump @@ -0,0 +1 @@ +../../bin/arm-linux-androideabi-objdump \ No newline at end of file diff --git a/arm-linux-androideabi/bin/ranlib b/arm-linux-androideabi/bin/ranlib new file mode 120000 index 0000000..7cf5d4f --- /dev/null +++ b/arm-linux-androideabi/bin/ranlib @@ -0,0 +1 @@ +../../bin/arm-linux-androideabi-ranlib \ No newline at end of file diff --git a/arm-linux-androideabi/bin/strip b/arm-linux-androideabi/bin/strip new file mode 120000 index 0000000..3daf7a6 --- /dev/null +++ b/arm-linux-androideabi/bin/strip @@ -0,0 +1 @@ +../../bin/arm-linux-androideabi-strip \ No newline at end of file diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.x b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.x new file mode 100644 index 0000000..04d195b --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.x @@ -0,0 +1,246 @@ +/* Default linker script, for normal executables */ +/* Modified for Android. */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0x00010000); . = 0x00010000 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.init : { *(.rel.init) } + .rela.init : { *(.rela.init) } + .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) } + .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) } + .rel.fini : { *(.rel.fini) } + .rela.fini : { *(.rela.fini) } + .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) } + .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) } + .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) } + .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) } + .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) } + .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) } + .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) } + .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) } + .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) } + .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) } + .rel.ctors : { *(.rel.ctors) } + .rela.ctors : { *(.rela.ctors) } + .rel.dtors : { *(.rel.dtors) } + .rela.dtors : { *(.rela.dtors) } + .rel.got : { *(.rel.got) } + .rela.got : { *(.rela.got) } + .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) } + .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } + .rel.iplt : + { + PROVIDE_HIDDEN (__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN (__rel_iplt_end = .); + } + .rela.iplt : + { + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + PROVIDE_HIDDEN (__preinit_array_start = .); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + PROVIDE_HIDDEN (__preinit_array_end = .); + PROVIDE_HIDDEN (__init_array_start = .); + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + PROVIDE_HIDDEN (__init_array_end = .); + PROVIDE_HIDDEN (__fini_array_start = .); + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + PROVIDE_HIDDEN (__fini_array_end = .); + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + . = DATA_SEGMENT_RELRO_END (0, .); + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xbn b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xbn new file mode 100644 index 0000000..4a05e8a --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xbn @@ -0,0 +1,243 @@ +/* Script for -N: mix text and data on same page; don't align data */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0x00010000); . = 0x00010000 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.init : { *(.rel.init) } + .rela.init : { *(.rela.init) } + .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) } + .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) } + .rel.fini : { *(.rel.fini) } + .rela.fini : { *(.rela.fini) } + .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) } + .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) } + .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) } + .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) } + .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) } + .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) } + .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) } + .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) } + .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) } + .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) } + .rel.ctors : { *(.rel.ctors) } + .rela.ctors : { *(.rela.ctors) } + .rel.dtors : { *(.rel.dtors) } + .rela.dtors : { *(.rela.dtors) } + .rel.got : { *(.rel.got) } + .rela.got : { *(.rela.got) } + .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) } + .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } + .rel.iplt : + { + PROVIDE_HIDDEN (__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN (__rel_iplt_end = .); + } + .rela.iplt : + { + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = .; + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + PROVIDE_HIDDEN (__preinit_array_start = .); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + PROVIDE_HIDDEN (__preinit_array_end = .); + PROVIDE_HIDDEN (__init_array_start = .); + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + PROVIDE_HIDDEN (__init_array_end = .); + PROVIDE_HIDDEN (__fini_array_start = .); + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + PROVIDE_HIDDEN (__fini_array_end = .); + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xc b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xc new file mode 100644 index 0000000..2aa9174 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xc @@ -0,0 +1,244 @@ +/* Script for -z combreloc: combine and sort reloc sections */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0x00010000); . = 0x00010000 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.dyn : + { + *(.rel.init) + *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) + *(.rel.fini) + *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) + *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) + *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) + *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) + *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) + *(.rel.ctors) + *(.rel.dtors) + *(.rel.got) + *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN (__rel_iplt_end = .); + } + .rela.dyn : + { + *(.rela.init) + *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) + *(.rela.fini) + *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) + *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) + *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) + *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) + *(.rela.ctors) + *(.rela.dtors) + *(.rela.got) + *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + PROVIDE_HIDDEN (__preinit_array_start = .); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + PROVIDE_HIDDEN (__preinit_array_end = .); + PROVIDE_HIDDEN (__init_array_start = .); + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + PROVIDE_HIDDEN (__init_array_end = .); + PROVIDE_HIDDEN (__fini_array_start = .); + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + PROVIDE_HIDDEN (__fini_array_end = .); + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + . = DATA_SEGMENT_RELRO_END (0, .); + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xd b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xd new file mode 100644 index 0000000..a49e267 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xd @@ -0,0 +1,245 @@ +/* Script for ld -pie: link position independent executable */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.init : { *(.rel.init) } + .rela.init : { *(.rela.init) } + .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) } + .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) } + .rel.fini : { *(.rel.fini) } + .rela.fini : { *(.rela.fini) } + .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) } + .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) } + .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) } + .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) } + .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) } + .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) } + .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) } + .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) } + .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) } + .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) } + .rel.ctors : { *(.rel.ctors) } + .rela.ctors : { *(.rela.ctors) } + .rel.dtors : { *(.rel.dtors) } + .rela.dtors : { *(.rela.dtors) } + .rel.got : { *(.rel.got) } + .rela.got : { *(.rela.got) } + .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) } + .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } + .rel.iplt : + { + PROVIDE_HIDDEN (__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN (__rel_iplt_end = .); + } + .rela.iplt : + { + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + PROVIDE_HIDDEN (__preinit_array_start = .); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + PROVIDE_HIDDEN (__preinit_array_end = .); + PROVIDE_HIDDEN (__init_array_start = .); + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + PROVIDE_HIDDEN (__init_array_end = .); + PROVIDE_HIDDEN (__fini_array_start = .); + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + PROVIDE_HIDDEN (__fini_array_end = .); + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + . = DATA_SEGMENT_RELRO_END (0, .); + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdc b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdc new file mode 100644 index 0000000..f90f0b3 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdc @@ -0,0 +1,244 @@ +/* Script for -pie -z combreloc: position independent executable, combine & sort relocs */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.dyn : + { + *(.rel.init) + *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) + *(.rel.fini) + *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) + *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) + *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) + *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) + *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) + *(.rel.ctors) + *(.rel.dtors) + *(.rel.got) + *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN (__rel_iplt_end = .); + } + .rela.dyn : + { + *(.rela.init) + *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) + *(.rela.fini) + *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) + *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) + *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) + *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) + *(.rela.ctors) + *(.rela.dtors) + *(.rela.got) + *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + PROVIDE_HIDDEN (__preinit_array_start = .); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + PROVIDE_HIDDEN (__preinit_array_end = .); + PROVIDE_HIDDEN (__init_array_start = .); + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + PROVIDE_HIDDEN (__init_array_end = .); + PROVIDE_HIDDEN (__fini_array_start = .); + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + PROVIDE_HIDDEN (__fini_array_end = .); + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + . = DATA_SEGMENT_RELRO_END (0, .); + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdw b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdw new file mode 100644 index 0000000..056f45a --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdw @@ -0,0 +1,244 @@ +/* Script for -pie -z combreloc -z now -z relro: position independent executable, combine & sort relocs */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.dyn : + { + *(.rel.init) + *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) + *(.rel.fini) + *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) + *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) + *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) + *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) + *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) + *(.rel.ctors) + *(.rel.dtors) + *(.rel.got) + *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN (__rel_iplt_end = .); + } + .rela.dyn : + { + *(.rela.init) + *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) + *(.rela.fini) + *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) + *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) + *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) + *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) + *(.rela.ctors) + *(.rela.dtors) + *(.rela.got) + *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + PROVIDE_HIDDEN (__preinit_array_start = .); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + PROVIDE_HIDDEN (__preinit_array_end = .); + PROVIDE_HIDDEN (__init_array_start = .); + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + PROVIDE_HIDDEN (__init_array_end = .); + PROVIDE_HIDDEN (__fini_array_start = .); + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + PROVIDE_HIDDEN (__fini_array_end = .); + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + . = DATA_SEGMENT_RELRO_END (0, .); + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xn b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xn new file mode 100644 index 0000000..5e64b79 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xn @@ -0,0 +1,245 @@ +/* Script for -n: mix text and data on same page */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0x00010000); . = 0x00010000 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.init : { *(.rel.init) } + .rela.init : { *(.rela.init) } + .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) } + .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) } + .rel.fini : { *(.rel.fini) } + .rela.fini : { *(.rela.fini) } + .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) } + .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) } + .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) } + .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) } + .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) } + .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) } + .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) } + .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) } + .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) } + .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) } + .rel.ctors : { *(.rel.ctors) } + .rela.ctors : { *(.rela.ctors) } + .rel.dtors : { *(.rel.dtors) } + .rela.dtors : { *(.rela.dtors) } + .rel.got : { *(.rel.got) } + .rela.got : { *(.rela.got) } + .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) } + .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } + .rel.iplt : + { + PROVIDE_HIDDEN (__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN (__rel_iplt_end = .); + } + .rela.iplt : + { + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + PROVIDE_HIDDEN (__preinit_array_start = .); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + PROVIDE_HIDDEN (__preinit_array_end = .); + PROVIDE_HIDDEN (__init_array_start = .); + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + PROVIDE_HIDDEN (__init_array_end = .); + PROVIDE_HIDDEN (__fini_array_start = .); + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + PROVIDE_HIDDEN (__fini_array_end = .); + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + . = DATA_SEGMENT_RELRO_END (0, .); + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xr b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xr new file mode 100644 index 0000000..d48d4ec --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xr @@ -0,0 +1,166 @@ +/* Script for ld -r: link without relocation */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) + /* For some reason, the Solaris linker makes bad executables + if gld -r is used and the intermediate file has sections starting + at non-zero addresses. Could be a Solaris ld bug, could be a GNU ld + bug. But for now assigning the zero vmas works. */ +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + .interp 0 : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash 0 : { *(.hash) } + .gnu.hash 0 : { *(.gnu.hash) } + .dynsym 0 : { *(.dynsym) } + .dynstr 0 : { *(.dynstr) } + .gnu.version 0 : { *(.gnu.version) } + .gnu.version_d 0: { *(.gnu.version_d) } + .gnu.version_r 0: { *(.gnu.version_r) } + .rel.init 0 : { *(.rel.init) } + .rela.init 0 : { *(.rela.init) } + .rel.text 0 : { *(.rel.text) } + .rela.text 0 : { *(.rela.text) } + .rel.fini 0 : { *(.rel.fini) } + .rela.fini 0 : { *(.rela.fini) } + .rel.rodata 0 : { *(.rel.rodata) } + .rela.rodata 0 : { *(.rela.rodata) } + .rel.data.rel.ro 0 : { *(.rel.data.rel.ro) } + .rela.data.rel.ro 0 : { *(.rela.data.rel.ro) } + .rel.data 0 : { *(.rel.data) } + .rela.data 0 : { *(.rela.data) } + .rel.tdata 0 : { *(.rel.tdata) } + .rela.tdata 0 : { *(.rela.tdata) } + .rel.tbss 0 : { *(.rel.tbss) } + .rela.tbss 0 : { *(.rela.tbss) } + .rel.ctors 0 : { *(.rel.ctors) } + .rela.ctors 0 : { *(.rela.ctors) } + .rel.dtors 0 : { *(.rel.dtors) } + .rela.dtors 0 : { *(.rela.dtors) } + .rel.got 0 : { *(.rel.got) } + .rela.got 0 : { *(.rela.got) } + .rel.bss 0 : { *(.rel.bss) } + .rela.bss 0 : { *(.rela.bss) } + .rel.iplt 0 : + { + *(.rel.iplt) + } + .rela.iplt 0 : + { + *(.rela.iplt) + } + .rel.plt 0 : + { + *(.rel.plt) + } + .rela.plt 0 : + { + *(.rela.plt) + } + .init 0 : + { + KEEP (*(SORT_NONE(.init))) + } + .plt 0 : { *(.plt) } + .iplt 0 : { *(.iplt) } + .text 0 : + { + *(.text .stub) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + } + .fini 0 : + { + KEEP (*(SORT_NONE(.fini))) + } + .rodata 0 : { *(.rodata) } + .rodata1 0 : { *(.rodata1) } + .ARM.extab 0 : { *(.ARM.extab) } + .ARM.exidx 0 : { *(.ARM.exidx) } + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + /* Exception handling */ + .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata 0 : { *(.tdata) } + .tbss 0 : { *(.tbss) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + .preinit_array 0 : + { + KEEP (*(.preinit_array)) + } + .jcr 0 : { KEEP (*(.jcr)) } + .dynamic 0 : { *(.dynamic) } + .got 0 : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data 0 : + { + *(.data) + } + .data1 0 : { *(.data1) } + .bss 0 : + { + *(.dynbss) + *(.bss) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + } + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xs b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xs new file mode 100644 index 0000000..ef80780 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xs @@ -0,0 +1,234 @@ +/* Script for ld --shared: link shared library */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + . = 0 + SIZEOF_HEADERS; + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.init : { *(.rel.init) } + .rela.init : { *(.rela.init) } + .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) } + .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) } + .rel.fini : { *(.rel.fini) } + .rela.fini : { *(.rela.fini) } + .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) } + .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) } + .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) } + .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) } + .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) } + .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) } + .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) } + .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) } + .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) } + .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) } + .rel.ctors : { *(.rel.ctors) } + .rela.ctors : { *(.rela.ctors) } + .rel.dtors : { *(.rel.dtors) } + .rela.dtors : { *(.rela.dtors) } + .rel.got : { *(.rel.got) } + .rela.got : { *(.rela.got) } + .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) } + .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } + .rel.iplt : + { + *(.rel.iplt) + } + .rela.iplt : + { + *(.rela.iplt) + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + . = DATA_SEGMENT_RELRO_END (0, .); + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsc b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsc new file mode 100644 index 0000000..fcc8641 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsc @@ -0,0 +1,234 @@ +/* Script for --shared -z combreloc: shared library, combine & sort relocs */ +/* Modified for Android. */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + . = 0 + SIZEOF_HEADERS; + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.dyn : + { + *(.rel.init) + *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) + *(.rel.fini) + *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) + *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) + *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) + *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) + *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) + *(.rel.ctors) + *(.rel.dtors) + *(.rel.got) + *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) + *(.rel.iplt) + } + .rela.dyn : + { + *(.rela.init) + *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) + *(.rela.fini) + *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) + *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) + *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) + *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) + *(.rela.ctors) + *(.rela.dtors) + *(.rela.got) + *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) + *(.rela.iplt) + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + . = DATA_SEGMENT_RELRO_END (0, .); + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsw b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsw new file mode 100644 index 0000000..c89c021 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsw @@ -0,0 +1,233 @@ +/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + . = 0 + SIZEOF_HEADERS; + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.dyn : + { + *(.rel.init) + *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) + *(.rel.fini) + *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) + *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) + *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) + *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) + *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) + *(.rel.ctors) + *(.rel.dtors) + *(.rel.got) + *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) + *(.rel.iplt) + } + .rela.dyn : + { + *(.rela.init) + *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) + *(.rela.fini) + *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) + *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) + *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) + *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) + *(.rela.ctors) + *(.rela.dtors) + *(.rela.got) + *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) + *(.rela.iplt) + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + . = DATA_SEGMENT_RELRO_END (0, .); + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xu b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xu new file mode 100644 index 0000000..52d639e --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xu @@ -0,0 +1,167 @@ +/* Script for ld -Ur: link w/out relocation, do create constructors */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) + /* For some reason, the Solaris linker makes bad executables + if gld -r is used and the intermediate file has sections starting + at non-zero addresses. Could be a Solaris ld bug, could be a GNU ld + bug. But for now assigning the zero vmas works. */ +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + .interp 0 : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash 0 : { *(.hash) } + .gnu.hash 0 : { *(.gnu.hash) } + .dynsym 0 : { *(.dynsym) } + .dynstr 0 : { *(.dynstr) } + .gnu.version 0 : { *(.gnu.version) } + .gnu.version_d 0: { *(.gnu.version_d) } + .gnu.version_r 0: { *(.gnu.version_r) } + .rel.init 0 : { *(.rel.init) } + .rela.init 0 : { *(.rela.init) } + .rel.text 0 : { *(.rel.text) } + .rela.text 0 : { *(.rela.text) } + .rel.fini 0 : { *(.rel.fini) } + .rela.fini 0 : { *(.rela.fini) } + .rel.rodata 0 : { *(.rel.rodata) } + .rela.rodata 0 : { *(.rela.rodata) } + .rel.data.rel.ro 0 : { *(.rel.data.rel.ro) } + .rela.data.rel.ro 0 : { *(.rela.data.rel.ro) } + .rel.data 0 : { *(.rel.data) } + .rela.data 0 : { *(.rela.data) } + .rel.tdata 0 : { *(.rel.tdata) } + .rela.tdata 0 : { *(.rela.tdata) } + .rel.tbss 0 : { *(.rel.tbss) } + .rela.tbss 0 : { *(.rela.tbss) } + .rel.ctors 0 : { *(.rel.ctors) } + .rela.ctors 0 : { *(.rela.ctors) } + .rel.dtors 0 : { *(.rel.dtors) } + .rela.dtors 0 : { *(.rela.dtors) } + .rel.got 0 : { *(.rel.got) } + .rela.got 0 : { *(.rela.got) } + .rel.bss 0 : { *(.rel.bss) } + .rela.bss 0 : { *(.rela.bss) } + .rel.iplt 0 : + { + *(.rel.iplt) + } + .rela.iplt 0 : + { + *(.rela.iplt) + } + .rel.plt 0 : + { + *(.rel.plt) + } + .rela.plt 0 : + { + *(.rela.plt) + } + .init 0 : + { + KEEP (*(SORT_NONE(.init))) + } + .plt 0 : { *(.plt) } + .iplt 0 : { *(.iplt) } + .text 0 : + { + *(.text .stub) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + } + .fini 0 : + { + KEEP (*(SORT_NONE(.fini))) + } + .rodata 0 : { *(.rodata) } + .rodata1 0 : { *(.rodata1) } + .ARM.extab 0 : { *(.ARM.extab) } + .ARM.exidx 0 : { *(.ARM.exidx) } + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + /* Exception handling */ + .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata 0 : { *(.tdata) } + .tbss 0 : { *(.tbss) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + .preinit_array 0 : + { + KEEP (*(.preinit_array)) + } + .jcr 0 : { KEEP (*(.jcr)) } + .dynamic 0 : { *(.dynamic) } + .got 0 : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data 0 : + { + *(.data) + SORT(CONSTRUCTORS) + } + .data1 0 : { *(.data1) } + .bss 0 : + { + *(.dynbss) + *(.bss) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + } + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xw b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xw new file mode 100644 index 0000000..f7418bd --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xw @@ -0,0 +1,244 @@ +/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0x00010000); . = 0x00010000 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.dyn : + { + *(.rel.init) + *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) + *(.rel.fini) + *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) + *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) + *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) + *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) + *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) + *(.rel.ctors) + *(.rel.dtors) + *(.rel.got) + *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN (__rel_iplt_end = .); + } + .rela.dyn : + { + *(.rela.init) + *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) + *(.rela.fini) + *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) + *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) + *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) + *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) + *(.rela.ctors) + *(.rela.dtors) + *(.rela.got) + *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + PROVIDE_HIDDEN (__preinit_array_start = .); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + PROVIDE_HIDDEN (__preinit_array_end = .); + PROVIDE_HIDDEN (__init_array_start = .); + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + PROVIDE_HIDDEN (__init_array_end = .); + PROVIDE_HIDDEN (__fini_array_start = .); + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + PROVIDE_HIDDEN (__fini_array_end = .); + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + . = DATA_SEGMENT_RELRO_END (0, .); + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.x b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.x new file mode 100644 index 0000000..80f3ea8 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.x @@ -0,0 +1,246 @@ +/* Default linker script, for normal executables */ +/* Modified for Android. */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0x00010000); . = 0x00010000 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.init : { *(.rel.init) } + .rela.init : { *(.rela.init) } + .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) } + .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) } + .rel.fini : { *(.rel.fini) } + .rela.fini : { *(.rela.fini) } + .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) } + .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) } + .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) } + .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) } + .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) } + .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) } + .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) } + .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) } + .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) } + .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) } + .rel.ctors : { *(.rel.ctors) } + .rela.ctors : { *(.rela.ctors) } + .rel.dtors : { *(.rel.dtors) } + .rela.dtors : { *(.rela.dtors) } + .rel.got : { *(.rel.got) } + .rela.got : { *(.rela.got) } + .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) } + .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } + .rel.iplt : + { + PROVIDE_HIDDEN (__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN (__rel_iplt_end = .); + } + .rela.iplt : + { + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + PROVIDE_HIDDEN (__preinit_array_start = .); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + PROVIDE_HIDDEN (__preinit_array_end = .); + PROVIDE_HIDDEN (__init_array_start = .); + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + PROVIDE_HIDDEN (__init_array_end = .); + PROVIDE_HIDDEN (__fini_array_start = .); + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + PROVIDE_HIDDEN (__fini_array_end = .); + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + . = DATA_SEGMENT_RELRO_END (0, .); + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xbn b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xbn new file mode 100644 index 0000000..fdda7f3 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xbn @@ -0,0 +1,243 @@ +/* Script for -N: mix text and data on same page; don't align data */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0x00010000); . = 0x00010000 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.init : { *(.rel.init) } + .rela.init : { *(.rela.init) } + .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) } + .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) } + .rel.fini : { *(.rel.fini) } + .rela.fini : { *(.rela.fini) } + .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) } + .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) } + .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) } + .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) } + .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) } + .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) } + .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) } + .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) } + .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) } + .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) } + .rel.ctors : { *(.rel.ctors) } + .rela.ctors : { *(.rela.ctors) } + .rel.dtors : { *(.rel.dtors) } + .rela.dtors : { *(.rela.dtors) } + .rel.got : { *(.rel.got) } + .rela.got : { *(.rela.got) } + .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) } + .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } + .rel.iplt : + { + PROVIDE_HIDDEN (__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN (__rel_iplt_end = .); + } + .rela.iplt : + { + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = .; + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + PROVIDE_HIDDEN (__preinit_array_start = .); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + PROVIDE_HIDDEN (__preinit_array_end = .); + PROVIDE_HIDDEN (__init_array_start = .); + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + PROVIDE_HIDDEN (__init_array_end = .); + PROVIDE_HIDDEN (__fini_array_start = .); + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + PROVIDE_HIDDEN (__fini_array_end = .); + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xc b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xc new file mode 100644 index 0000000..1bf56a2 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xc @@ -0,0 +1,244 @@ +/* Script for -z combreloc: combine and sort reloc sections */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0x00010000); . = 0x00010000 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.dyn : + { + *(.rel.init) + *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) + *(.rel.fini) + *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) + *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) + *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) + *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) + *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) + *(.rel.ctors) + *(.rel.dtors) + *(.rel.got) + *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN (__rel_iplt_end = .); + } + .rela.dyn : + { + *(.rela.init) + *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) + *(.rela.fini) + *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) + *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) + *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) + *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) + *(.rela.ctors) + *(.rela.dtors) + *(.rela.got) + *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + PROVIDE_HIDDEN (__preinit_array_start = .); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + PROVIDE_HIDDEN (__preinit_array_end = .); + PROVIDE_HIDDEN (__init_array_start = .); + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + PROVIDE_HIDDEN (__init_array_end = .); + PROVIDE_HIDDEN (__fini_array_start = .); + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + PROVIDE_HIDDEN (__fini_array_end = .); + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + . = DATA_SEGMENT_RELRO_END (0, .); + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xd b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xd new file mode 100644 index 0000000..2600399 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xd @@ -0,0 +1,245 @@ +/* Script for ld -pie: link position independent executable */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.init : { *(.rel.init) } + .rela.init : { *(.rela.init) } + .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) } + .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) } + .rel.fini : { *(.rel.fini) } + .rela.fini : { *(.rela.fini) } + .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) } + .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) } + .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) } + .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) } + .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) } + .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) } + .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) } + .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) } + .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) } + .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) } + .rel.ctors : { *(.rel.ctors) } + .rela.ctors : { *(.rela.ctors) } + .rel.dtors : { *(.rel.dtors) } + .rela.dtors : { *(.rela.dtors) } + .rel.got : { *(.rel.got) } + .rela.got : { *(.rela.got) } + .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) } + .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } + .rel.iplt : + { + PROVIDE_HIDDEN (__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN (__rel_iplt_end = .); + } + .rela.iplt : + { + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + PROVIDE_HIDDEN (__preinit_array_start = .); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + PROVIDE_HIDDEN (__preinit_array_end = .); + PROVIDE_HIDDEN (__init_array_start = .); + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + PROVIDE_HIDDEN (__init_array_end = .); + PROVIDE_HIDDEN (__fini_array_start = .); + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + PROVIDE_HIDDEN (__fini_array_end = .); + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + . = DATA_SEGMENT_RELRO_END (0, .); + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdc b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdc new file mode 100644 index 0000000..c1b5373 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdc @@ -0,0 +1,244 @@ +/* Script for -pie -z combreloc: position independent executable, combine & sort relocs */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.dyn : + { + *(.rel.init) + *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) + *(.rel.fini) + *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) + *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) + *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) + *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) + *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) + *(.rel.ctors) + *(.rel.dtors) + *(.rel.got) + *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN (__rel_iplt_end = .); + } + .rela.dyn : + { + *(.rela.init) + *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) + *(.rela.fini) + *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) + *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) + *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) + *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) + *(.rela.ctors) + *(.rela.dtors) + *(.rela.got) + *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + PROVIDE_HIDDEN (__preinit_array_start = .); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + PROVIDE_HIDDEN (__preinit_array_end = .); + PROVIDE_HIDDEN (__init_array_start = .); + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + PROVIDE_HIDDEN (__init_array_end = .); + PROVIDE_HIDDEN (__fini_array_start = .); + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + PROVIDE_HIDDEN (__fini_array_end = .); + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + . = DATA_SEGMENT_RELRO_END (0, .); + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdw b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdw new file mode 100644 index 0000000..bf01d44 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdw @@ -0,0 +1,244 @@ +/* Script for -pie -z combreloc -z now -z relro: position independent executable, combine & sort relocs */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.dyn : + { + *(.rel.init) + *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) + *(.rel.fini) + *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) + *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) + *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) + *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) + *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) + *(.rel.ctors) + *(.rel.dtors) + *(.rel.got) + *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN (__rel_iplt_end = .); + } + .rela.dyn : + { + *(.rela.init) + *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) + *(.rela.fini) + *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) + *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) + *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) + *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) + *(.rela.ctors) + *(.rela.dtors) + *(.rela.got) + *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + PROVIDE_HIDDEN (__preinit_array_start = .); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + PROVIDE_HIDDEN (__preinit_array_end = .); + PROVIDE_HIDDEN (__init_array_start = .); + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + PROVIDE_HIDDEN (__init_array_end = .); + PROVIDE_HIDDEN (__fini_array_start = .); + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + PROVIDE_HIDDEN (__fini_array_end = .); + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + . = DATA_SEGMENT_RELRO_END (0, .); + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xn b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xn new file mode 100644 index 0000000..cc38f15 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xn @@ -0,0 +1,245 @@ +/* Script for -n: mix text and data on same page */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0x00010000); . = 0x00010000 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.init : { *(.rel.init) } + .rela.init : { *(.rela.init) } + .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) } + .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) } + .rel.fini : { *(.rel.fini) } + .rela.fini : { *(.rela.fini) } + .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) } + .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) } + .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) } + .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) } + .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) } + .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) } + .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) } + .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) } + .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) } + .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) } + .rel.ctors : { *(.rel.ctors) } + .rela.ctors : { *(.rela.ctors) } + .rel.dtors : { *(.rel.dtors) } + .rela.dtors : { *(.rela.dtors) } + .rel.got : { *(.rel.got) } + .rela.got : { *(.rela.got) } + .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) } + .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } + .rel.iplt : + { + PROVIDE_HIDDEN (__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN (__rel_iplt_end = .); + } + .rela.iplt : + { + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + PROVIDE_HIDDEN (__preinit_array_start = .); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + PROVIDE_HIDDEN (__preinit_array_end = .); + PROVIDE_HIDDEN (__init_array_start = .); + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + PROVIDE_HIDDEN (__init_array_end = .); + PROVIDE_HIDDEN (__fini_array_start = .); + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + PROVIDE_HIDDEN (__fini_array_end = .); + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + . = DATA_SEGMENT_RELRO_END (0, .); + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xr b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xr new file mode 100644 index 0000000..c3b0497 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xr @@ -0,0 +1,166 @@ +/* Script for ld -r: link without relocation */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) + /* For some reason, the Solaris linker makes bad executables + if gld -r is used and the intermediate file has sections starting + at non-zero addresses. Could be a Solaris ld bug, could be a GNU ld + bug. But for now assigning the zero vmas works. */ +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + .interp 0 : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash 0 : { *(.hash) } + .gnu.hash 0 : { *(.gnu.hash) } + .dynsym 0 : { *(.dynsym) } + .dynstr 0 : { *(.dynstr) } + .gnu.version 0 : { *(.gnu.version) } + .gnu.version_d 0: { *(.gnu.version_d) } + .gnu.version_r 0: { *(.gnu.version_r) } + .rel.init 0 : { *(.rel.init) } + .rela.init 0 : { *(.rela.init) } + .rel.text 0 : { *(.rel.text) } + .rela.text 0 : { *(.rela.text) } + .rel.fini 0 : { *(.rel.fini) } + .rela.fini 0 : { *(.rela.fini) } + .rel.rodata 0 : { *(.rel.rodata) } + .rela.rodata 0 : { *(.rela.rodata) } + .rel.data.rel.ro 0 : { *(.rel.data.rel.ro) } + .rela.data.rel.ro 0 : { *(.rela.data.rel.ro) } + .rel.data 0 : { *(.rel.data) } + .rela.data 0 : { *(.rela.data) } + .rel.tdata 0 : { *(.rel.tdata) } + .rela.tdata 0 : { *(.rela.tdata) } + .rel.tbss 0 : { *(.rel.tbss) } + .rela.tbss 0 : { *(.rela.tbss) } + .rel.ctors 0 : { *(.rel.ctors) } + .rela.ctors 0 : { *(.rela.ctors) } + .rel.dtors 0 : { *(.rel.dtors) } + .rela.dtors 0 : { *(.rela.dtors) } + .rel.got 0 : { *(.rel.got) } + .rela.got 0 : { *(.rela.got) } + .rel.bss 0 : { *(.rel.bss) } + .rela.bss 0 : { *(.rela.bss) } + .rel.iplt 0 : + { + *(.rel.iplt) + } + .rela.iplt 0 : + { + *(.rela.iplt) + } + .rel.plt 0 : + { + *(.rel.plt) + } + .rela.plt 0 : + { + *(.rela.plt) + } + .init 0 : + { + KEEP (*(SORT_NONE(.init))) + } + .plt 0 : { *(.plt) } + .iplt 0 : { *(.iplt) } + .text 0 : + { + *(.text .stub) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + } + .fini 0 : + { + KEEP (*(SORT_NONE(.fini))) + } + .rodata 0 : { *(.rodata) } + .rodata1 0 : { *(.rodata1) } + .ARM.extab 0 : { *(.ARM.extab) } + .ARM.exidx 0 : { *(.ARM.exidx) } + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + /* Exception handling */ + .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata 0 : { *(.tdata) } + .tbss 0 : { *(.tbss) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + .preinit_array 0 : + { + KEEP (*(.preinit_array)) + } + .jcr 0 : { KEEP (*(.jcr)) } + .dynamic 0 : { *(.dynamic) } + .got 0 : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data 0 : + { + *(.data) + } + .data1 0 : { *(.data1) } + .bss 0 : + { + *(.dynbss) + *(.bss) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + } + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xs b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xs new file mode 100644 index 0000000..ec4930d --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xs @@ -0,0 +1,234 @@ +/* Script for ld --shared: link shared library */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + . = 0 + SIZEOF_HEADERS; + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.init : { *(.rel.init) } + .rela.init : { *(.rela.init) } + .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) } + .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) } + .rel.fini : { *(.rel.fini) } + .rela.fini : { *(.rela.fini) } + .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) } + .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) } + .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) } + .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) } + .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) } + .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) } + .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) } + .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) } + .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) } + .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) } + .rel.ctors : { *(.rel.ctors) } + .rela.ctors : { *(.rela.ctors) } + .rel.dtors : { *(.rel.dtors) } + .rela.dtors : { *(.rela.dtors) } + .rel.got : { *(.rel.got) } + .rela.got : { *(.rela.got) } + .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) } + .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } + .rel.iplt : + { + *(.rel.iplt) + } + .rela.iplt : + { + *(.rela.iplt) + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + . = DATA_SEGMENT_RELRO_END (0, .); + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsc b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsc new file mode 100644 index 0000000..f2a0b09 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsc @@ -0,0 +1,234 @@ +/* Script for --shared -z combreloc: shared library, combine & sort relocs */ +/* Modified for Android. */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + . = 0 + SIZEOF_HEADERS; + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.dyn : + { + *(.rel.init) + *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) + *(.rel.fini) + *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) + *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) + *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) + *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) + *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) + *(.rel.ctors) + *(.rel.dtors) + *(.rel.got) + *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) + *(.rel.iplt) + } + .rela.dyn : + { + *(.rela.init) + *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) + *(.rela.fini) + *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) + *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) + *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) + *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) + *(.rela.ctors) + *(.rela.dtors) + *(.rela.got) + *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) + *(.rela.iplt) + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + . = DATA_SEGMENT_RELRO_END (0, .); + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsw b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsw new file mode 100644 index 0000000..aaab571 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsw @@ -0,0 +1,233 @@ +/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + . = 0 + SIZEOF_HEADERS; + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.dyn : + { + *(.rel.init) + *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) + *(.rel.fini) + *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) + *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) + *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) + *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) + *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) + *(.rel.ctors) + *(.rel.dtors) + *(.rel.got) + *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) + *(.rel.iplt) + } + .rela.dyn : + { + *(.rela.init) + *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) + *(.rela.fini) + *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) + *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) + *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) + *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) + *(.rela.ctors) + *(.rela.dtors) + *(.rela.got) + *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) + *(.rela.iplt) + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + . = DATA_SEGMENT_RELRO_END (0, .); + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xu b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xu new file mode 100644 index 0000000..7de1661 --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xu @@ -0,0 +1,167 @@ +/* Script for ld -Ur: link w/out relocation, do create constructors */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) + /* For some reason, the Solaris linker makes bad executables + if gld -r is used and the intermediate file has sections starting + at non-zero addresses. Could be a Solaris ld bug, could be a GNU ld + bug. But for now assigning the zero vmas works. */ +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + .interp 0 : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash 0 : { *(.hash) } + .gnu.hash 0 : { *(.gnu.hash) } + .dynsym 0 : { *(.dynsym) } + .dynstr 0 : { *(.dynstr) } + .gnu.version 0 : { *(.gnu.version) } + .gnu.version_d 0: { *(.gnu.version_d) } + .gnu.version_r 0: { *(.gnu.version_r) } + .rel.init 0 : { *(.rel.init) } + .rela.init 0 : { *(.rela.init) } + .rel.text 0 : { *(.rel.text) } + .rela.text 0 : { *(.rela.text) } + .rel.fini 0 : { *(.rel.fini) } + .rela.fini 0 : { *(.rela.fini) } + .rel.rodata 0 : { *(.rel.rodata) } + .rela.rodata 0 : { *(.rela.rodata) } + .rel.data.rel.ro 0 : { *(.rel.data.rel.ro) } + .rela.data.rel.ro 0 : { *(.rela.data.rel.ro) } + .rel.data 0 : { *(.rel.data) } + .rela.data 0 : { *(.rela.data) } + .rel.tdata 0 : { *(.rel.tdata) } + .rela.tdata 0 : { *(.rela.tdata) } + .rel.tbss 0 : { *(.rel.tbss) } + .rela.tbss 0 : { *(.rela.tbss) } + .rel.ctors 0 : { *(.rel.ctors) } + .rela.ctors 0 : { *(.rela.ctors) } + .rel.dtors 0 : { *(.rel.dtors) } + .rela.dtors 0 : { *(.rela.dtors) } + .rel.got 0 : { *(.rel.got) } + .rela.got 0 : { *(.rela.got) } + .rel.bss 0 : { *(.rel.bss) } + .rela.bss 0 : { *(.rela.bss) } + .rel.iplt 0 : + { + *(.rel.iplt) + } + .rela.iplt 0 : + { + *(.rela.iplt) + } + .rel.plt 0 : + { + *(.rel.plt) + } + .rela.plt 0 : + { + *(.rela.plt) + } + .init 0 : + { + KEEP (*(SORT_NONE(.init))) + } + .plt 0 : { *(.plt) } + .iplt 0 : { *(.iplt) } + .text 0 : + { + *(.text .stub) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + } + .fini 0 : + { + KEEP (*(SORT_NONE(.fini))) + } + .rodata 0 : { *(.rodata) } + .rodata1 0 : { *(.rodata1) } + .ARM.extab 0 : { *(.ARM.extab) } + .ARM.exidx 0 : { *(.ARM.exidx) } + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + /* Exception handling */ + .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata 0 : { *(.tdata) } + .tbss 0 : { *(.tbss) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + .preinit_array 0 : + { + KEEP (*(.preinit_array)) + } + .jcr 0 : { KEEP (*(.jcr)) } + .dynamic 0 : { *(.dynamic) } + .got 0 : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + .data 0 : + { + *(.data) + SORT(CONSTRUCTORS) + } + .data1 0 : { *(.data1) } + .bss 0 : + { + *(.dynbss) + *(.bss) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + } + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } +} diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xw b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xw new file mode 100644 index 0000000..f9550bf --- /dev/null +++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xw @@ -0,0 +1,244 @@ +/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */ +/* Copyright (C) 2014 Free Software Foundation, Inc. + Copying and distribution of this script, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. */ +OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +ENTRY(_start) +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = 0x00010000); . = 0x00010000 + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : { *(.note.gnu.build-id) } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.dyn : + { + *(.rel.init) + *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) + *(.rel.fini) + *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) + *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) + *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) + *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) + *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) + *(.rel.ctors) + *(.rel.dtors) + *(.rel.got) + *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rel_iplt_start = .); + *(.rel.iplt) + PROVIDE_HIDDEN (__rel_iplt_end = .); + } + .rela.dyn : + { + *(.rela.init) + *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) + *(.rela.fini) + *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) + *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) + *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) + *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) + *(.rela.ctors) + *(.rela.dtors) + *(.rela.got) + *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .rel.plt : + { + *(.rel.plt) + } + .rela.plt : + { + *(.rela.plt) + } + .init : + { + KEEP (*(SORT_NONE(.init))) + } + .plt : { *(.plt) } + .iplt : { *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + } + .fini : + { + KEEP (*(SORT_NONE(.fini))) + } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata1 : { *(.rodata1) } + .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) } + PROVIDE_HIDDEN (__exidx_start = .); + .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) } + PROVIDE_HIDDEN (__exidx_end = .); + .eh_frame_hdr : { *(.eh_frame_hdr) } + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. For 32 bits we want to align + at exactly a page boundary to make life easier for apriori. */ + . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + /* Ensure the __preinit_array_start label is properly aligned. We + could instead move the label definition inside the section, but + the linker would then create the section even if it turns out to + be empty, which isn't pretty. */ + . = ALIGN(32 / 8); + PROVIDE_HIDDEN (__preinit_array_start = .); + .preinit_array : + { + KEEP (*(.preinit_array)) + } + PROVIDE_HIDDEN (__preinit_array_end = .); + PROVIDE_HIDDEN (__init_array_start = .); + .init_array : + { + KEEP (*crtbegin*.o(.init_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*))) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors)) + } + PROVIDE_HIDDEN (__init_array_end = .); + PROVIDE_HIDDEN (__fini_array_start = .); + .fini_array : + { + KEEP (*crtbegin*.o(.fini_array)) + KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*))) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors)) + } + PROVIDE_HIDDEN (__fini_array_end = .); + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin*.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin*.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) } + . = DATA_SEGMENT_RELRO_END (0, .); + .data : + { + PROVIDE (__data_start = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + __bss_start = .; + __bss_start__ = .; + .bss : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. */ + . = ALIGN(32 / 8); + } + _bss_end__ = . ; __bss_end__ = . ; + . = ALIGN(32 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(32 / 8); + __end__ = . ; + _end = .; + _bss_end__ = . ; __bss_end__ = . ; __end__ = . ; + PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) } +} diff --git a/arm-linux-androideabi/lib/libatomic.a b/arm-linux-androideabi/lib/libatomic.a new file mode 100644 index 0000000..093fbea Binary files /dev/null and b/arm-linux-androideabi/lib/libatomic.a differ diff --git a/arm-linux-androideabi/lib/libgomp.a b/arm-linux-androideabi/lib/libgomp.a new file mode 100644 index 0000000..14d1da4 Binary files /dev/null and b/arm-linux-androideabi/lib/libgomp.a differ diff --git a/arm-linux-androideabi/lib/libgomp.spec b/arm-linux-androideabi/lib/libgomp.spec new file mode 100644 index 0000000..c7a666f --- /dev/null +++ b/arm-linux-androideabi/lib/libgomp.spec @@ -0,0 +1,3 @@ +# This spec file is read by gcc when linking. It is used to specify the +# standard libraries we need in order to link with libgomp. +*link_gomp: -lgomp -ldl diff --git a/arm-linux-androideabi/lib/libobjc.a b/arm-linux-androideabi/lib/libobjc.a new file mode 100644 index 0000000..a602727 Binary files /dev/null and b/arm-linux-androideabi/lib/libobjc.a differ diff --git a/bin/arm-linux-androideabi-addr2line b/bin/arm-linux-androideabi-addr2line new file mode 100755 index 0000000..0752740 Binary files /dev/null and b/bin/arm-linux-androideabi-addr2line differ diff --git a/bin/arm-linux-androideabi-ar b/bin/arm-linux-androideabi-ar new file mode 100755 index 0000000..c9fc7b9 Binary files /dev/null and b/bin/arm-linux-androideabi-ar differ diff --git a/bin/arm-linux-androideabi-as b/bin/arm-linux-androideabi-as new file mode 100755 index 0000000..178388d Binary files /dev/null and b/bin/arm-linux-androideabi-as differ diff --git a/bin/arm-linux-androideabi-c++ b/bin/arm-linux-androideabi-c++ new file mode 120000 index 0000000..818bae6 --- /dev/null +++ b/bin/arm-linux-androideabi-c++ @@ -0,0 +1 @@ +arm-linux-androideabi-g++ \ No newline at end of file diff --git a/bin/arm-linux-androideabi-c++filt b/bin/arm-linux-androideabi-c++filt new file mode 100755 index 0000000..f838b3e Binary files /dev/null and b/bin/arm-linux-androideabi-c++filt differ diff --git a/bin/arm-linux-androideabi-cpp b/bin/arm-linux-androideabi-cpp new file mode 100755 index 0000000..f1aeeec Binary files /dev/null and b/bin/arm-linux-androideabi-cpp differ diff --git a/bin/arm-linux-androideabi-dwp b/bin/arm-linux-androideabi-dwp new file mode 100755 index 0000000..6a290cd Binary files /dev/null and b/bin/arm-linux-androideabi-dwp differ diff --git a/bin/arm-linux-androideabi-elfedit b/bin/arm-linux-androideabi-elfedit new file mode 100755 index 0000000..9a367c0 Binary files /dev/null and b/bin/arm-linux-androideabi-elfedit differ diff --git a/bin/arm-linux-androideabi-g++ b/bin/arm-linux-androideabi-g++ new file mode 100755 index 0000000..54944a2 --- /dev/null +++ b/bin/arm-linux-androideabi-g++ @@ -0,0 +1,66 @@ +#!/usr/bin/python + +import os +import sys + +class CompilerWrapper(): + def __init__(self, argv): + self.args = argv + self.execargs = [] + self.real_compiler = None + self.argv0 = None + self.append_flags = [] + self.prepend_flags = [] + self.custom_flags = { + '--gomacc-path': None + } + + def set_real_compiler(self): + """Find the real compiler with the absolute path.""" + compiler_path = os.path.dirname(os.path.abspath(__file__)) + if os.path.islink(__file__): + compiler = os.path.basename(os.readlink(__file__)) + else: + compiler = os.path.basename(os.path.abspath(__file__)) + self.real_compiler = os.path.join( + compiler_path, + "real-" + compiler) + self.argv0 = self.real_compiler + + def process_gomacc_command(self): + """Return the gomacc command if '--gomacc-path' is set.""" + gomacc = self.custom_flags['--gomacc-path'] + if gomacc and os.path.isfile(gomacc): + self.argv0 = gomacc + self.execargs += [gomacc] + + def parse_custom_flags(self): + i = 0 + args = [] + while i < len(self.args): + if self.args[i] in self.custom_flags: + self.custom_flags[self.args[i]] = self.args[i + 1] + i = i + 2 + else: + args.append(self.args[i]) + i = i + 1 + self.args = args + + def add_flags(self): + self.args = self.prepend_flags + self.args + self.append_flags + + def invoke_compiler(self): + self.set_real_compiler() + self.parse_custom_flags() + self.process_gomacc_command() + self.add_flags() + self.execargs += [self.real_compiler] + self.args + os.execv(self.argv0, self.execargs) + + +def main(argv): + cw = CompilerWrapper(argv[1:]) + cw.invoke_compiler() + +if __name__ == "__main__": + main(sys.argv) diff --git a/bin/arm-linux-androideabi-gcc b/bin/arm-linux-androideabi-gcc new file mode 100755 index 0000000..54944a2 --- /dev/null +++ b/bin/arm-linux-androideabi-gcc @@ -0,0 +1,66 @@ +#!/usr/bin/python + +import os +import sys + +class CompilerWrapper(): + def __init__(self, argv): + self.args = argv + self.execargs = [] + self.real_compiler = None + self.argv0 = None + self.append_flags = [] + self.prepend_flags = [] + self.custom_flags = { + '--gomacc-path': None + } + + def set_real_compiler(self): + """Find the real compiler with the absolute path.""" + compiler_path = os.path.dirname(os.path.abspath(__file__)) + if os.path.islink(__file__): + compiler = os.path.basename(os.readlink(__file__)) + else: + compiler = os.path.basename(os.path.abspath(__file__)) + self.real_compiler = os.path.join( + compiler_path, + "real-" + compiler) + self.argv0 = self.real_compiler + + def process_gomacc_command(self): + """Return the gomacc command if '--gomacc-path' is set.""" + gomacc = self.custom_flags['--gomacc-path'] + if gomacc and os.path.isfile(gomacc): + self.argv0 = gomacc + self.execargs += [gomacc] + + def parse_custom_flags(self): + i = 0 + args = [] + while i < len(self.args): + if self.args[i] in self.custom_flags: + self.custom_flags[self.args[i]] = self.args[i + 1] + i = i + 2 + else: + args.append(self.args[i]) + i = i + 1 + self.args = args + + def add_flags(self): + self.args = self.prepend_flags + self.args + self.append_flags + + def invoke_compiler(self): + self.set_real_compiler() + self.parse_custom_flags() + self.process_gomacc_command() + self.add_flags() + self.execargs += [self.real_compiler] + self.args + os.execv(self.argv0, self.execargs) + + +def main(argv): + cw = CompilerWrapper(argv[1:]) + cw.invoke_compiler() + +if __name__ == "__main__": + main(sys.argv) diff --git a/bin/arm-linux-androideabi-gcc-7.2 b/bin/arm-linux-androideabi-gcc-7.2 new file mode 120000 index 0000000..b8b9fad --- /dev/null +++ b/bin/arm-linux-androideabi-gcc-7.2 @@ -0,0 +1 @@ +arm-linux-androideabi-gcc \ No newline at end of file diff --git a/bin/arm-linux-androideabi-gcc-7.2.0 b/bin/arm-linux-androideabi-gcc-7.2.0 new file mode 100755 index 0000000..cc344cb Binary files /dev/null and b/bin/arm-linux-androideabi-gcc-7.2.0 differ diff --git a/bin/arm-linux-androideabi-gcc-ar b/bin/arm-linux-androideabi-gcc-ar new file mode 100755 index 0000000..e813b3a Binary files /dev/null and b/bin/arm-linux-androideabi-gcc-ar differ diff --git a/bin/arm-linux-androideabi-gcc-nm b/bin/arm-linux-androideabi-gcc-nm new file mode 100755 index 0000000..99401ed Binary files /dev/null and b/bin/arm-linux-androideabi-gcc-nm differ diff --git a/bin/arm-linux-androideabi-gcc-ranlib b/bin/arm-linux-androideabi-gcc-ranlib new file mode 100755 index 0000000..94af354 Binary files /dev/null and b/bin/arm-linux-androideabi-gcc-ranlib differ diff --git a/bin/arm-linux-androideabi-gcov b/bin/arm-linux-androideabi-gcov new file mode 100755 index 0000000..5b3bbcc Binary files /dev/null and b/bin/arm-linux-androideabi-gcov differ diff --git a/bin/arm-linux-androideabi-gcov-dump b/bin/arm-linux-androideabi-gcov-dump new file mode 100755 index 0000000..e28a7dd Binary files /dev/null and b/bin/arm-linux-androideabi-gcov-dump differ diff --git a/bin/arm-linux-androideabi-gcov-tool b/bin/arm-linux-androideabi-gcov-tool new file mode 100755 index 0000000..f6603b2 Binary files /dev/null and b/bin/arm-linux-androideabi-gcov-tool differ diff --git a/bin/arm-linux-androideabi-gprof b/bin/arm-linux-androideabi-gprof new file mode 100755 index 0000000..d713af5 Binary files /dev/null and b/bin/arm-linux-androideabi-gprof differ diff --git a/bin/arm-linux-androideabi-ld b/bin/arm-linux-androideabi-ld new file mode 120000 index 0000000..4194d24 --- /dev/null +++ b/bin/arm-linux-androideabi-ld @@ -0,0 +1 @@ +arm-linux-androideabi-ld.gold \ No newline at end of file diff --git a/bin/arm-linux-androideabi-ld.bfd b/bin/arm-linux-androideabi-ld.bfd new file mode 100755 index 0000000..c096e95 Binary files /dev/null and b/bin/arm-linux-androideabi-ld.bfd differ diff --git a/bin/arm-linux-androideabi-ld.gold b/bin/arm-linux-androideabi-ld.gold new file mode 100755 index 0000000..e9773f7 Binary files /dev/null and b/bin/arm-linux-androideabi-ld.gold differ diff --git a/bin/arm-linux-androideabi-nm b/bin/arm-linux-androideabi-nm new file mode 100755 index 0000000..5aae0f0 Binary files /dev/null and b/bin/arm-linux-androideabi-nm differ diff --git a/bin/arm-linux-androideabi-objcopy b/bin/arm-linux-androideabi-objcopy new file mode 100755 index 0000000..3670324 Binary files /dev/null and b/bin/arm-linux-androideabi-objcopy differ diff --git a/bin/arm-linux-androideabi-objdump b/bin/arm-linux-androideabi-objdump new file mode 100755 index 0000000..eb68242 Binary files /dev/null and b/bin/arm-linux-androideabi-objdump differ diff --git a/bin/arm-linux-androideabi-ranlib b/bin/arm-linux-androideabi-ranlib new file mode 100755 index 0000000..0bd79bd Binary files /dev/null and b/bin/arm-linux-androideabi-ranlib differ diff --git a/bin/arm-linux-androideabi-readelf b/bin/arm-linux-androideabi-readelf new file mode 100755 index 0000000..01db8e5 Binary files /dev/null and b/bin/arm-linux-androideabi-readelf differ diff --git a/bin/arm-linux-androideabi-size b/bin/arm-linux-androideabi-size new file mode 100755 index 0000000..044483f Binary files /dev/null and b/bin/arm-linux-androideabi-size differ diff --git a/bin/arm-linux-androideabi-strings b/bin/arm-linux-androideabi-strings new file mode 100755 index 0000000..d9af3b4 Binary files /dev/null and b/bin/arm-linux-androideabi-strings differ diff --git a/bin/arm-linux-androideabi-strip b/bin/arm-linux-androideabi-strip new file mode 100755 index 0000000..e9f7086 Binary files /dev/null and b/bin/arm-linux-androideabi-strip differ diff --git a/bin/real-arm-linux-androideabi-g++ b/bin/real-arm-linux-androideabi-g++ new file mode 100755 index 0000000..3f023c2 Binary files /dev/null and b/bin/real-arm-linux-androideabi-g++ differ diff --git a/bin/real-arm-linux-androideabi-gcc b/bin/real-arm-linux-androideabi-gcc new file mode 100755 index 0000000..cc344cb Binary files /dev/null and b/bin/real-arm-linux-androideabi-gcc differ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/crtbegin.o b/lib/gcc/arm-linux-androideabi/7.2.0/crtbegin.o new file mode 100644 index 0000000..73fa404 Binary files /dev/null and b/lib/gcc/arm-linux-androideabi/7.2.0/crtbegin.o differ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/crtbeginS.o b/lib/gcc/arm-linux-androideabi/7.2.0/crtbeginS.o new file mode 100644 index 0000000..3e5fdff Binary files /dev/null and b/lib/gcc/arm-linux-androideabi/7.2.0/crtbeginS.o differ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/crtbeginT.o b/lib/gcc/arm-linux-androideabi/7.2.0/crtbeginT.o new file mode 100644 index 0000000..73fa404 Binary files /dev/null and b/lib/gcc/arm-linux-androideabi/7.2.0/crtbeginT.o differ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/crtend.o b/lib/gcc/arm-linux-androideabi/7.2.0/crtend.o new file mode 100644 index 0000000..ad874c2 Binary files /dev/null and b/lib/gcc/arm-linux-androideabi/7.2.0/crtend.o differ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/crtendS.o b/lib/gcc/arm-linux-androideabi/7.2.0/crtendS.o new file mode 100644 index 0000000..ad874c2 Binary files /dev/null and b/lib/gcc/arm-linux-androideabi/7.2.0/crtendS.o differ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/crtfastmath.o b/lib/gcc/arm-linux-androideabi/7.2.0/crtfastmath.o new file mode 100644 index 0000000..a39558b Binary files /dev/null and b/lib/gcc/arm-linux-androideabi/7.2.0/crtfastmath.o differ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include-fixed/README b/lib/gcc/arm-linux-androideabi/7.2.0/include-fixed/README new file mode 100644 index 0000000..7086a77 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include-fixed/README @@ -0,0 +1,14 @@ +This README file is copied into the directory for GCC-only header files +when fixincludes is run by the makefile for GCC. + +Many of the files in this directory were automatically edited from the +standard system header files by the fixincludes process. They are +system-specific, and will not work on any other kind of system. They +are also not part of GCC. The reason we have to do this is because +GCC requires ANSI C headers and many vendors supply ANSI-incompatible +headers. + +Because this is an automated process, sometimes headers get "fixed" +that do not, strictly speaking, need a fix. As long as nothing is broken +by the process, it is just an unfortunate collateral inconvenience. +We would like to rectify it, if it is not "too inconvenient". diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include-fixed/limits.h b/lib/gcc/arm-linux-androideabi/7.2.0/include-fixed/limits.h new file mode 100644 index 0000000..743481d --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include-fixed/limits.h @@ -0,0 +1,197 @@ +/* Copyright (C) 1992-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* This administrivia gets added to the beginning of limits.h + if the system has its own version of limits.h. */ + +/* We use _GCC_LIMITS_H_ because we want this not to match + any macros that the system's limits.h uses for its own purposes. */ +#ifndef _GCC_LIMITS_H_ /* Terminated in limity.h. */ +#define _GCC_LIMITS_H_ + +#ifndef _LIBC_LIMITS_H_ +/* Use "..." so that we find syslimits.h only in this same directory. */ +#include "syslimits.h" +#endif +/* Copyright (C) 1991-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#ifndef _LIMITS_H___ +#define _LIMITS_H___ + +/* Number of bits in a `char'. */ +#undef CHAR_BIT +#define CHAR_BIT __CHAR_BIT__ + +/* Maximum length of a multibyte character. */ +#ifndef MB_LEN_MAX +#define MB_LEN_MAX 1 +#endif + +/* Minimum and maximum values a `signed char' can hold. */ +#undef SCHAR_MIN +#define SCHAR_MIN (-SCHAR_MAX - 1) +#undef SCHAR_MAX +#define SCHAR_MAX __SCHAR_MAX__ + +/* Maximum value an `unsigned char' can hold. (Minimum is 0). */ +#undef UCHAR_MAX +#if __SCHAR_MAX__ == __INT_MAX__ +# define UCHAR_MAX (SCHAR_MAX * 2U + 1U) +#else +# define UCHAR_MAX (SCHAR_MAX * 2 + 1) +#endif + +/* Minimum and maximum values a `char' can hold. */ +#ifdef __CHAR_UNSIGNED__ +# undef CHAR_MIN +# if __SCHAR_MAX__ == __INT_MAX__ +# define CHAR_MIN 0U +# else +# define CHAR_MIN 0 +# endif +# undef CHAR_MAX +# define CHAR_MAX UCHAR_MAX +#else +# undef CHAR_MIN +# define CHAR_MIN SCHAR_MIN +# undef CHAR_MAX +# define CHAR_MAX SCHAR_MAX +#endif + +/* Minimum and maximum values a `signed short int' can hold. */ +#undef SHRT_MIN +#define SHRT_MIN (-SHRT_MAX - 1) +#undef SHRT_MAX +#define SHRT_MAX __SHRT_MAX__ + +/* Maximum value an `unsigned short int' can hold. (Minimum is 0). */ +#undef USHRT_MAX +#if __SHRT_MAX__ == __INT_MAX__ +# define USHRT_MAX (SHRT_MAX * 2U + 1U) +#else +# define USHRT_MAX (SHRT_MAX * 2 + 1) +#endif + +/* Minimum and maximum values a `signed int' can hold. */ +#undef INT_MIN +#define INT_MIN (-INT_MAX - 1) +#undef INT_MAX +#define INT_MAX __INT_MAX__ + +/* Maximum value an `unsigned int' can hold. (Minimum is 0). */ +#undef UINT_MAX +#define UINT_MAX (INT_MAX * 2U + 1U) + +/* Minimum and maximum values a `signed long int' can hold. + (Same as `int'). */ +#undef LONG_MIN +#define LONG_MIN (-LONG_MAX - 1L) +#undef LONG_MAX +#define LONG_MAX __LONG_MAX__ + +/* Maximum value an `unsigned long int' can hold. (Minimum is 0). */ +#undef ULONG_MAX +#define ULONG_MAX (LONG_MAX * 2UL + 1UL) + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +/* Minimum and maximum values a `signed long long int' can hold. */ +# undef LLONG_MIN +# define LLONG_MIN (-LLONG_MAX - 1LL) +# undef LLONG_MAX +# define LLONG_MAX __LONG_LONG_MAX__ + +/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */ +# undef ULLONG_MAX +# define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL) +#endif + +#if defined (__GNU_LIBRARY__) ? defined (__USE_GNU) : !defined (__STRICT_ANSI__) +/* Minimum and maximum values a `signed long long int' can hold. */ +# undef LONG_LONG_MIN +# define LONG_LONG_MIN (-LONG_LONG_MAX - 1LL) +# undef LONG_LONG_MAX +# define LONG_LONG_MAX __LONG_LONG_MAX__ + +/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */ +# undef ULONG_LONG_MAX +# define ULONG_LONG_MAX (LONG_LONG_MAX * 2ULL + 1ULL) +#endif + +#ifdef __STDC_WANT_IEC_60559_BFP_EXT__ +/* TS 18661-1 widths of integer types. */ +# undef CHAR_WIDTH +# define CHAR_WIDTH __SCHAR_WIDTH__ +# undef SCHAR_WIDTH +# define SCHAR_WIDTH __SCHAR_WIDTH__ +# undef UCHAR_WIDTH +# define UCHAR_WIDTH __SCHAR_WIDTH__ +# undef SHRT_WIDTH +# define SHRT_WIDTH __SHRT_WIDTH__ +# undef USHRT_WIDTH +# define USHRT_WIDTH __SHRT_WIDTH__ +# undef INT_WIDTH +# define INT_WIDTH __INT_WIDTH__ +# undef UINT_WIDTH +# define UINT_WIDTH __INT_WIDTH__ +# undef LONG_WIDTH +# define LONG_WIDTH __LONG_WIDTH__ +# undef ULONG_WIDTH +# define ULONG_WIDTH __LONG_WIDTH__ +# undef LLONG_WIDTH +# define LLONG_WIDTH __LONG_LONG_WIDTH__ +# undef ULLONG_WIDTH +# define ULLONG_WIDTH __LONG_LONG_WIDTH__ +#endif + +#endif /* _LIMITS_H___ */ +/* This administrivia gets added to the end of limits.h + if the system has its own version of limits.h. */ + +#else /* not _GCC_LIMITS_H_ */ + +#ifdef _GCC_NEXT_LIMITS_H +#include_next /* recurse down to the real one */ +#endif + +#endif /* not _GCC_LIMITS_H_ */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include-fixed/linux/a.out.h b/lib/gcc/arm-linux-androideabi/7.2.0/include-fixed/linux/a.out.h new file mode 100644 index 0000000..6a014db --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include-fixed/linux/a.out.h @@ -0,0 +1,229 @@ +/* DO NOT EDIT THIS FILE. + + It has been auto-edited by fixincludes from: + + "/tmp/9241679f33ac5c75d523b183283db2ca/sysroot/usr/include/linux/a.out.h" + + This had to be done to correct non-standard usages in the + original, manufacturer supplied header file. */ + +/**************************************************************************** + **************************************************************************** + *** + *** This header was automatically generated from a Linux kernel header + *** of the same name, to make information necessary for userspace to + *** call into the kernel available to libc. It contains only constants, + *** structures, and macros generated from the original header, and thus, + *** contains no copyrightable information. + *** + **************************************************************************** + ****************************************************************************/ +#ifndef __A_OUT_GNU_H__ +#define __A_OUT_GNU_H__ + +#define __GNU_EXEC_MACROS__ + +#ifndef __STRUCT_EXEC_OVERRIDE__ + +#include + +#endif + +enum machine_type { +#ifdef M_OLDSUN2 + M__OLDSUN2 = M_OLDSUN2, +#else + M_OLDSUN2 = 0, +#endif +#ifdef M_68010 + M__68010 = M_68010, +#else + M_68010 = 1, +#endif +#ifdef M_68020 + M__68020 = M_68020, +#else + M_68020 = 2, +#endif +#ifdef M_SPARC + M__SPARC = M_SPARC, +#else + M_SPARC = 3, +#endif + + M_386 = 100, + M_MIPS1 = 151, + M_MIPS2 = 152 +}; + +#ifndef N_MAGIC +#define N_MAGIC(exec) ((exec).a_info & 0xffff) +#endif +#define N_MACHTYPE(exec) ((enum machine_type)(((exec).a_info >> 16) & 0xff)) +#define N_FLAGS(exec) (((exec).a_info >> 24) & 0xff) +#define N_SET_INFO(exec, magic, type, flags) ((exec).a_info = ((magic) & 0xffff) | (((int)(type) & 0xff) << 16) | (((flags) & 0xff) << 24)) +#define N_SET_MAGIC(exec, magic) ((exec).a_info = (((exec).a_info & 0xffff0000) | ((magic) & 0xffff))) + +#define N_SET_MACHTYPE(exec, machtype) ((exec).a_info = ((exec).a_info&0xff00ffff) | ((((int)(machtype))&0xff) << 16)) + +#define N_SET_FLAGS(exec, flags) ((exec).a_info = ((exec).a_info&0x00ffffff) | (((flags) & 0xff) << 24)) + +#define OMAGIC 0407 + +#define NMAGIC 0410 + +#define ZMAGIC 0413 + +#define QMAGIC 0314 + +#define CMAGIC 0421 + +#ifndef N_BADMAG +#define N_BADMAG(x) (N_MAGIC(x) != OMAGIC && N_MAGIC(x) != NMAGIC && N_MAGIC(x) != ZMAGIC && N_MAGIC(x) != QMAGIC) +#endif + +#define _N_HDROFF(x) (1024 - sizeof (struct exec)) + +#ifndef N_TXTOFF +#define N_TXTOFF(x) (N_MAGIC(x) == ZMAGIC ? _N_HDROFF((x)) + sizeof (struct exec) : (N_MAGIC(x) == QMAGIC ? 0 : sizeof (struct exec))) +#endif + +#ifndef N_DATOFF +#define N_DATOFF(x) (N_TXTOFF(x) + (x).a_text) +#endif + +#ifndef N_TRELOFF +#define N_TRELOFF(x) (N_DATOFF(x) + (x).a_data) +#endif + +#ifndef N_DRELOFF +#define N_DRELOFF(x) (N_TRELOFF(x) + N_TRSIZE(x)) +#endif + +#ifndef N_SYMOFF +#define N_SYMOFF(x) (N_DRELOFF(x) + N_DRSIZE(x)) +#endif + +#ifndef N_STROFF +#define N_STROFF(x) (N_SYMOFF(x) + N_SYMSIZE(x)) +#endif + +#ifndef N_TXTADDR +#define N_TXTADDR(x) (N_MAGIC(x) == QMAGIC ? PAGE_SIZE : 0) +#endif + +#if defined(vax) || defined(hp300) || defined(pyr) +#define SEGMENT_SIZE page_size +#endif +#ifdef sony +#define SEGMENT_SIZE 0x2000 +#endif +#ifdef is68k +#define SEGMENT_SIZE 0x20000 +#endif +#if defined(m68k) && defined(PORTAR) +#define PAGE_SIZE 0x400 +#define SEGMENT_SIZE PAGE_SIZE +#endif + +#ifdef __linux__ +#include +#if defined(__i386__) || defined(__mc68000__) +#define SEGMENT_SIZE 1024 +#else +#ifndef SEGMENT_SIZE +#define SEGMENT_SIZE PAGE_SIZE +#endif +#endif +#endif + +#define _N_SEGMENT_ROUND(x) ALIGN(x, SEGMENT_SIZE) + +#define _N_TXTENDADDR(x) (N_TXTADDR(x)+(x).a_text) + +#ifndef N_DATADDR +#define N_DATADDR(x) (N_MAGIC(x)==OMAGIC? (_N_TXTENDADDR(x)) : (_N_SEGMENT_ROUND (_N_TXTENDADDR(x)))) +#endif + +#ifndef N_BSSADDR +#define N_BSSADDR(x) (N_DATADDR(x) + (x).a_data) +#endif + +#ifndef N_NLIST_DECLARED +struct nlist { + union { + char *n_name; + struct nlist *n_next; + long n_strx; + } n_un; + unsigned char n_type; + char n_other; + short n_desc; + unsigned long n_value; +}; +#endif + +#ifndef N_UNDF +#define N_UNDF 0 +#endif +#ifndef N_ABS +#define N_ABS 2 +#endif +#ifndef N_TEXT +#define N_TEXT 4 +#endif +#ifndef N_DATA +#define N_DATA 6 +#endif +#ifndef N_BSS +#define N_BSS 8 +#endif +#ifndef N_FN +#define N_FN 15 +#endif + +#ifndef N_EXT +#define N_EXT 1 +#endif +#ifndef N_TYPE +#define N_TYPE 036 +#endif +#ifndef N_STAB +#define N_STAB 0340 +#endif + +#define N_INDR 0xa + +#define N_SETA 0x14 +#define N_SETT 0x16 +#define N_SETD 0x18 +#define N_SETB 0x1A + +#define N_SETV 0x1C + +#ifndef N_RELOCATION_INFO_DECLARED + +struct relocation_info +{ + + int r_address; + + unsigned int r_symbolnum:24; + + unsigned int r_pcrel:1; + + unsigned int r_length:2; + + unsigned int r_extern:1; + +#ifdef NS32K + unsigned r_bsr:1; + unsigned r_disp:1; + unsigned r_pad:2; +#else + unsigned int r_pad:4; +#endif +}; +#endif + +#endif diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include-fixed/syslimits.h b/lib/gcc/arm-linux-androideabi/7.2.0/include-fixed/syslimits.h new file mode 100644 index 0000000..a362802 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include-fixed/syslimits.h @@ -0,0 +1,8 @@ +/* syslimits.h stands for the system's own limits.h file. + If we can use it ok unmodified, then we install this text. + If fixincludes fixes it, then the fixed version is installed + instead of this text. */ + +#define _GCC_NEXT_LIMITS_H /* tell gcc's limits.h to recurse */ +#include_next +#undef _GCC_NEXT_LIMITS_H diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/arm_acle.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/arm_acle.h new file mode 100644 index 0000000..972e28e --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/arm_acle.h @@ -0,0 +1,241 @@ +/* ARM Non-NEON ACLE intrinsics include file. + + Copyright (C) 2013-2017 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _GCC_ARM_ACLE_H +#define _GCC_ARM_ACLE_H + +#include +#ifdef __cplusplus +extern "C" { +#endif + +#if (!__thumb__ || __thumb2__) && __ARM_ARCH >= 4 +__extension__ static __inline void __attribute__ ((__always_inline__)) +__arm_cdp (const unsigned int __coproc, const unsigned int __opc1, + const unsigned int __CRd, const unsigned int __CRn, + const unsigned int __CRm, const unsigned int __opc2) +{ + return __builtin_arm_cdp (__coproc, __opc1, __CRd, __CRn, __CRm, __opc2); +} + +__extension__ static __inline void __attribute__ ((__always_inline__)) +__arm_ldc (const unsigned int __coproc, const unsigned int __CRd, + const void * __p) +{ + return __builtin_arm_ldc (__coproc, __CRd, __p); +} + +__extension__ static __inline void __attribute__ ((__always_inline__)) +__arm_ldcl (const unsigned int __coproc, const unsigned int __CRd, + const void * __p) +{ + return __builtin_arm_ldcl (__coproc, __CRd, __p); +} + +__extension__ static __inline void __attribute__ ((__always_inline__)) +__arm_stc (const unsigned int __coproc, const unsigned int __CRd, + void * __p) +{ + return __builtin_arm_stc (__coproc, __CRd, __p); +} + +__extension__ static __inline void __attribute__ ((__always_inline__)) +__arm_stcl (const unsigned int __coproc, const unsigned int __CRd, + void * __p) +{ + return __builtin_arm_stcl (__coproc, __CRd, __p); +} + +__extension__ static __inline void __attribute__ ((__always_inline__)) +__arm_mcr (const unsigned int __coproc, const unsigned int __opc1, + uint32_t __value, const unsigned int __CRn, const unsigned int __CRm, + const unsigned int __opc2) +{ + return __builtin_arm_mcr (__coproc, __opc1, __value, __CRn, __CRm, __opc2); +} + +__extension__ static __inline uint32_t __attribute__ ((__always_inline__)) +__arm_mrc (const unsigned int __coproc, const unsigned int __opc1, + const unsigned int __CRn, const unsigned int __CRm, + const unsigned int __opc2) +{ + return __builtin_arm_mrc (__coproc, __opc1, __CRn, __CRm, __opc2); +} +#if __ARM_ARCH >= 5 +__extension__ static __inline void __attribute__ ((__always_inline__)) +__arm_cdp2 (const unsigned int __coproc, const unsigned int __opc1, + const unsigned int __CRd, const unsigned int __CRn, + const unsigned int __CRm, const unsigned int __opc2) +{ + return __builtin_arm_cdp2 (__coproc, __opc1, __CRd, __CRn, __CRm, __opc2); +} + +__extension__ static __inline void __attribute__ ((__always_inline__)) +__arm_ldc2 (const unsigned int __coproc, const unsigned int __CRd, + const void * __p) +{ + return __builtin_arm_ldc2 (__coproc, __CRd, __p); +} + +__extension__ static __inline void __attribute__ ((__always_inline__)) +__arm_ldc2l (const unsigned int __coproc, const unsigned int __CRd, + const void * __p) +{ + return __builtin_arm_ldc2l (__coproc, __CRd, __p); +} + +__extension__ static __inline void __attribute__ ((__always_inline__)) +__arm_stc2 (const unsigned int __coproc, const unsigned int __CRd, + void * __p) +{ + return __builtin_arm_stc2 (__coproc, __CRd, __p); +} + +__extension__ static __inline void __attribute__ ((__always_inline__)) +__arm_stc2l (const unsigned int __coproc, const unsigned int __CRd, + void * __p) +{ + return __builtin_arm_stc2l (__coproc, __CRd, __p); +} + +__extension__ static __inline void __attribute__ ((__always_inline__)) +__arm_mcr2 (const unsigned int __coproc, const unsigned int __opc1, + uint32_t __value, const unsigned int __CRn, + const unsigned int __CRm, const unsigned int __opc2) +{ + return __builtin_arm_mcr2 (__coproc, __opc1, __value, __CRn, __CRm, __opc2); +} + +__extension__ static __inline uint32_t __attribute__ ((__always_inline__)) +__arm_mrc2 (const unsigned int __coproc, const unsigned int __opc1, + const unsigned int __CRn, const unsigned int __CRm, + const unsigned int __opc2) +{ + return __builtin_arm_mrc2 (__coproc, __opc1, __CRn, __CRm, __opc2); +} + +#if __ARM_ARCH >= 6 || defined (__ARM_ARCH_5TE__) + +__extension__ static __inline void __attribute__ ((__always_inline__)) +__arm_mcrr (const unsigned int __coproc, const unsigned int __opc1, + uint64_t __value, const unsigned int __CRm) +{ + return __builtin_arm_mcrr (__coproc, __opc1, __value, __CRm); +} + +__extension__ static __inline uint64_t __attribute__ ((__always_inline__)) +__arm_mrrc (const unsigned int __coproc, const unsigned int __opc1, + const unsigned int __CRm) +{ + return __builtin_arm_mrrc (__coproc, __opc1, __CRm); +} + +#if __ARM_ARCH >= 6 + +__extension__ static __inline void __attribute__ ((__always_inline__)) +__arm_mcrr2 (const unsigned int __coproc, const unsigned int __opc1, + uint64_t __value, const unsigned int __CRm) +{ + return __builtin_arm_mcrr2 (__coproc, __opc1, __value, __CRm); +} + +__extension__ static __inline uint64_t __attribute__ ((__always_inline__)) +__arm_mrrc2 (const unsigned int __coproc, const unsigned int __opc1, + const unsigned int __CRm) +{ + return __builtin_arm_mrrc2 (__coproc, __opc1, __CRm); +} +#endif /* __ARM_ARCH >= 6. */ +#endif /* __ARM_ARCH >= 6 || defined (__ARM_ARCH_5TE__). */ +#endif /* __ARM_ARCH >= 5. */ +#endif /* (!__thumb__ || __thumb2__) && __ARM_ARCH >= 4. */ + +#ifdef __ARM_FEATURE_CRC32 +__extension__ static __inline uint32_t __attribute__ ((__always_inline__)) +__crc32b (uint32_t __a, uint8_t __b) +{ + return __builtin_arm_crc32b (__a, __b); +} + +__extension__ static __inline uint32_t __attribute__ ((__always_inline__)) +__crc32h (uint32_t __a, uint16_t __b) +{ + return __builtin_arm_crc32h (__a, __b); +} + +__extension__ static __inline uint32_t __attribute__ ((__always_inline__)) +__crc32w (uint32_t __a, uint32_t __b) +{ + return __builtin_arm_crc32w (__a, __b); +} + +#ifdef __ARM_32BIT_STATE +__extension__ static __inline uint32_t __attribute__ ((__always_inline__)) +__crc32d (uint32_t __a, uint64_t __b) +{ + uint32_t __d; + + __d = __crc32w (__crc32w (__a, __b & 0xffffffffULL), __b >> 32); + return __d; +} +#endif + +__extension__ static __inline uint32_t __attribute__ ((__always_inline__)) +__crc32cb (uint32_t __a, uint8_t __b) +{ + return __builtin_arm_crc32cb (__a, __b); +} + +__extension__ static __inline uint32_t __attribute__ ((__always_inline__)) +__crc32ch (uint32_t __a, uint16_t __b) +{ + return __builtin_arm_crc32ch (__a, __b); +} + +__extension__ static __inline uint32_t __attribute__ ((__always_inline__)) +__crc32cw (uint32_t __a, uint32_t __b) +{ + return __builtin_arm_crc32cw (__a, __b); +} + +#ifdef __ARM_32BIT_STATE +__extension__ static __inline uint32_t __attribute__ ((__always_inline__)) +__crc32cd (uint32_t __a, uint64_t __b) +{ + uint32_t __d; + + __d = __crc32cw (__crc32cw (__a, __b & 0xffffffffULL), __b >> 32); + return __d; +} +#endif + +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/arm_cmse.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/arm_cmse.h new file mode 100644 index 0000000..8fde273 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/arm_cmse.h @@ -0,0 +1,199 @@ +/* ARMv8-M Secure Extensions intrinsics include file. + + Copyright (C) 2015-2017 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + + +#ifndef _GCC_ARM_CMSE_H +#define _GCC_ARM_CMSE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#if __ARM_FEATURE_CMSE & 1 + +#include +#include + +#ifdef __ARM_BIG_ENDIAN + +typedef union { + struct cmse_address_info { +#if __ARM_FEATURE_CMSE & 2 + unsigned idau_region:8; + unsigned idau_region_valid:1; + unsigned secure:1; + unsigned nonsecure_readwrite_ok:1; + unsigned nonsecure_read_ok:1; +#else + unsigned :12; +#endif + unsigned readwrite_ok:1; + unsigned read_ok:1; +#if __ARM_FEATURE_CMSE & 2 + unsigned sau_region_valid:1; +#else + unsigned :1; +#endif + unsigned mpu_region_valid:1; +#if __ARM_FEATURE_CMSE & 2 + unsigned sau_region:8; +#else + unsigned :8; +#endif + unsigned mpu_region:8; + } flags; + unsigned value; +} cmse_address_info_t; + +#else + +typedef union { + struct cmse_address_info { + unsigned mpu_region:8; +#if __ARM_FEATURE_CMSE & 2 + unsigned sau_region:8; +#else + unsigned :8; +#endif + unsigned mpu_region_valid:1; +#if __ARM_FEATURE_CMSE & 2 + unsigned sau_region_valid:1; +#else + unsigned :1; +#endif + unsigned read_ok:1; + unsigned readwrite_ok:1; +#if __ARM_FEATURE_CMSE & 2 + unsigned nonsecure_read_ok:1; + unsigned nonsecure_readwrite_ok:1; + unsigned secure:1; + unsigned idau_region_valid:1; + unsigned idau_region:8; +#else + unsigned :12; +#endif + } flags; + unsigned value; +} cmse_address_info_t; + +#endif /* __ARM_BIG_ENDIAN */ + +#define cmse_TT_fptr(p) (__cmse_TT_fptr ((__cmse_fptr)(p))) + +typedef void (*__cmse_fptr)(void); + +#define __CMSE_TT_ASM(flags) \ +{ \ + cmse_address_info_t __result; \ + __asm__ ("tt" # flags " %0,%1" \ + : "=r"(__result) \ + : "r"(__p) \ + : "memory"); \ + return __result; \ +} + +__extension__ static __inline __attribute__ ((__always_inline__)) +cmse_address_info_t +__cmse_TT_fptr (__cmse_fptr __p) +__CMSE_TT_ASM () + +__extension__ static __inline __attribute__ ((__always_inline__)) +cmse_address_info_t +cmse_TT (void *__p) +__CMSE_TT_ASM () + +#define cmse_TTT_fptr(p) (__cmse_TTT_fptr ((__cmse_fptr)(p))) + +__extension__ static __inline __attribute__ ((__always_inline__)) +cmse_address_info_t +__cmse_TTT_fptr (__cmse_fptr __p) +__CMSE_TT_ASM (t) + +__extension__ static __inline __attribute__ ((__always_inline__)) +cmse_address_info_t +cmse_TTT (void *__p) +__CMSE_TT_ASM (t) + +#if __ARM_FEATURE_CMSE & 2 + +#define cmse_TTA_fptr(p) (__cmse_TTA_fptr ((__cmse_fptr)(p))) + +__extension__ static __inline __attribute__ ((__always_inline__)) +cmse_address_info_t +__cmse_TTA_fptr (__cmse_fptr __p) +__CMSE_TT_ASM (a) + +__extension__ static __inline __attribute__ ((__always_inline__)) +cmse_address_info_t +cmse_TTA (void *__p) +__CMSE_TT_ASM (a) + +#define cmse_TTAT_fptr(p) (__cmse_TTAT_fptr ((__cmse_fptr)(p))) + +__extension__ static __inline cmse_address_info_t +__attribute__ ((__always_inline__)) +__cmse_TTAT_fptr (__cmse_fptr __p) +__CMSE_TT_ASM (at) + +__extension__ static __inline cmse_address_info_t +__attribute__ ((__always_inline__)) +cmse_TTAT (void *__p) +__CMSE_TT_ASM (at) + +/* FIXME: diagnose use outside cmse_nonsecure_entry functions. */ +__extension__ static __inline int __attribute__ ((__always_inline__)) +cmse_nonsecure_caller (void) +{ + return __builtin_arm_cmse_nonsecure_caller (); +} + +#define CMSE_AU_NONSECURE 2 +#define CMSE_MPU_NONSECURE 16 +#define CMSE_NONSECURE 18 + +#define cmse_nsfptr_create(p) ((typeof ((p))) ((intptr_t) (p) & ~1)) + +#define cmse_is_nsfptr(p) (!((intptr_t) (p) & 1)) + +#endif /* __ARM_FEATURE_CMSE & 2 */ + +#define CMSE_MPU_UNPRIV 4 +#define CMSE_MPU_READWRITE 1 +#define CMSE_MPU_READ 8 + +__extension__ void * +cmse_check_address_range (void *, size_t, int); + +#define cmse_check_pointed_object(p, f) \ + ((typeof ((p))) cmse_check_address_range ((p), sizeof (*(p)), (f))) + +#endif /* __ARM_FEATURE_CMSE & 1 */ + +#ifdef __cplusplus +} +#endif + +#endif /* _GCC_ARM_CMSE_H */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/arm_fp16.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/arm_fp16.h new file mode 100644 index 0000000..36d1d03 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/arm_fp16.h @@ -0,0 +1,255 @@ +/* ARM FP16 intrinsics include file. + + Copyright (C) 2016-2017 Free Software Foundation, Inc. + Contributed by ARM Ltd. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _GCC_ARM_FP16_H +#define _GCC_ARM_FP16_H 1 + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* Intrinsics for FP16 instructions. */ +#pragma GCC push_options +#pragma GCC target ("fpu=fp-armv8") + +#if defined (__ARM_FEATURE_FP16_SCALAR_ARITHMETIC) + +typedef __fp16 float16_t; + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vabsh_f16 (float16_t __a) +{ + return __builtin_neon_vabshf (__a); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vaddh_f16 (float16_t __a, float16_t __b) +{ + return __a + __b; +} + +__extension__ static __inline int32_t __attribute__ ((__always_inline__)) +vcvtah_s32_f16 (float16_t __a) +{ + return __builtin_neon_vcvtahssi (__a); +} + +__extension__ static __inline uint32_t __attribute__ ((__always_inline__)) +vcvtah_u32_f16 (float16_t __a) +{ + return __builtin_neon_vcvtahusi (__a); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vcvth_f16_s32 (int32_t __a) +{ + return __builtin_neon_vcvthshf (__a); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vcvth_f16_u32 (uint32_t __a) +{ + return __builtin_neon_vcvthuhf (__a); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vcvth_n_f16_s32 (int32_t __a, const int __b) +{ + return __builtin_neon_vcvths_nhf (__a, __b); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vcvth_n_f16_u32 (uint32_t __a, const int __b) +{ + return __builtin_neon_vcvthu_nhf ((int32_t)__a, __b); +} + +__extension__ static __inline int32_t __attribute__ ((__always_inline__)) +vcvth_n_s32_f16 (float16_t __a, const int __b) +{ + return __builtin_neon_vcvths_nsi (__a, __b); +} + +__extension__ static __inline uint32_t __attribute__ ((__always_inline__)) +vcvth_n_u32_f16 (float16_t __a, const int __b) +{ + return (uint32_t)__builtin_neon_vcvthu_nsi (__a, __b); +} + +__extension__ static __inline int32_t __attribute__ ((__always_inline__)) +vcvth_s32_f16 (float16_t __a) +{ + return __builtin_neon_vcvthssi (__a); +} + +__extension__ static __inline uint32_t __attribute__ ((__always_inline__)) +vcvth_u32_f16 (float16_t __a) +{ + return __builtin_neon_vcvthusi (__a); +} + +__extension__ static __inline int32_t __attribute__ ((__always_inline__)) +vcvtmh_s32_f16 (float16_t __a) +{ + return __builtin_neon_vcvtmhssi (__a); +} + +__extension__ static __inline uint32_t __attribute__ ((__always_inline__)) +vcvtmh_u32_f16 (float16_t __a) +{ + return __builtin_neon_vcvtmhusi (__a); +} + +__extension__ static __inline int32_t __attribute__ ((__always_inline__)) +vcvtnh_s32_f16 (float16_t __a) +{ + return __builtin_neon_vcvtnhssi (__a); +} + +__extension__ static __inline uint32_t __attribute__ ((__always_inline__)) +vcvtnh_u32_f16 (float16_t __a) +{ + return __builtin_neon_vcvtnhusi (__a); +} + +__extension__ static __inline int32_t __attribute__ ((__always_inline__)) +vcvtph_s32_f16 (float16_t __a) +{ + return __builtin_neon_vcvtphssi (__a); +} + +__extension__ static __inline uint32_t __attribute__ ((__always_inline__)) +vcvtph_u32_f16 (float16_t __a) +{ + return __builtin_neon_vcvtphusi (__a); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vdivh_f16 (float16_t __a, float16_t __b) +{ + return __a / __b; +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vfmah_f16 (float16_t __a, float16_t __b, float16_t __c) +{ + return __builtin_neon_vfmahf (__a, __b, __c); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vfmsh_f16 (float16_t __a, float16_t __b, float16_t __c) +{ + return __builtin_neon_vfmshf (__a, __b, __c); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vmaxnmh_f16 (float16_t __a, float16_t __b) +{ + return __builtin_neon_vmaxnmhf (__a, __b); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vminnmh_f16 (float16_t __a, float16_t __b) +{ + return __builtin_neon_vminnmhf (__a, __b); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vmulh_f16 (float16_t __a, float16_t __b) +{ + return __a * __b; +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vnegh_f16 (float16_t __a) +{ + return - __a; +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vrndah_f16 (float16_t __a) +{ + return __builtin_neon_vrndahf (__a); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vrndh_f16 (float16_t __a) +{ + return __builtin_neon_vrndhf (__a); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vrndih_f16 (float16_t __a) +{ + return __builtin_neon_vrndihf (__a); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vrndmh_f16 (float16_t __a) +{ + return __builtin_neon_vrndmhf (__a); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vrndnh_f16 (float16_t __a) +{ + return __builtin_neon_vrndnhf (__a); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vrndph_f16 (float16_t __a) +{ + return __builtin_neon_vrndphf (__a); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vrndxh_f16 (float16_t __a) +{ + return __builtin_neon_vrndxhf (__a); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vsqrth_f16 (float16_t __a) +{ + return __builtin_neon_vsqrthf (__a); +} + +__extension__ static __inline float16_t __attribute__ ((__always_inline__)) +vsubh_f16 (float16_t __a, float16_t __b) +{ + return __a - __b; +} + +#endif /* __ARM_FEATURE_FP16_SCALAR_ARITHMETIC */ +#pragma GCC pop_options + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/arm_neon.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/arm_neon.h new file mode 100644 index 0000000..f81d77e --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/arm_neon.h @@ -0,0 +1,18020 @@ +/* ARM NEON intrinsics include file. + + Copyright (C) 2006-2017 Free Software Foundation, Inc. + Contributed by CodeSourcery. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _GCC_ARM_NEON_H +#define _GCC_ARM_NEON_H 1 + +#ifndef __ARM_FP +#error "NEON intrinsics not available with the soft-float ABI. Please use -mfloat-abi=softp or -mfloat-abi=hard" +#else + +#pragma GCC push_options +#pragma GCC target ("fpu=neon") + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +typedef __simd64_int8_t int8x8_t; +typedef __simd64_int16_t int16x4_t; +typedef __simd64_int32_t int32x2_t; +typedef __builtin_neon_di int64x1_t; +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +typedef __fp16 float16_t; +typedef __simd64_float16_t float16x4_t; +#endif +typedef __simd64_float32_t float32x2_t; +typedef __simd64_poly8_t poly8x8_t; +typedef __simd64_poly16_t poly16x4_t; +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +typedef __builtin_neon_poly64 poly64x1_t; +#pragma GCC pop_options +typedef __simd64_uint8_t uint8x8_t; +typedef __simd64_uint16_t uint16x4_t; +typedef __simd64_uint32_t uint32x2_t; +typedef __builtin_neon_udi uint64x1_t; + +typedef __simd128_int8_t int8x16_t; +typedef __simd128_int16_t int16x8_t; +typedef __simd128_int32_t int32x4_t; +typedef __simd128_int64_t int64x2_t; +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +typedef __simd128_float16_t float16x8_t; +#endif +typedef __simd128_float32_t float32x4_t; +typedef __simd128_poly8_t poly8x16_t; +typedef __simd128_poly16_t poly16x8_t; +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +typedef __builtin_neon_poly64 poly64x2_t __attribute__ ((__vector_size__ (16))); +#pragma GCC pop_options + +typedef __simd128_uint8_t uint8x16_t; +typedef __simd128_uint16_t uint16x8_t; +typedef __simd128_uint32_t uint32x4_t; +typedef __simd128_uint64_t uint64x2_t; + +typedef float float32_t; + +/* The Poly types are user visible and live in their own world, + keep them that way. */ +typedef __builtin_neon_poly8 poly8_t; +typedef __builtin_neon_poly16 poly16_t; +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +typedef __builtin_neon_poly64 poly64_t; +typedef __builtin_neon_poly128 poly128_t; +#pragma GCC pop_options + +typedef struct int8x8x2_t +{ + int8x8_t val[2]; +} int8x8x2_t; + +typedef struct int8x16x2_t +{ + int8x16_t val[2]; +} int8x16x2_t; + +typedef struct int16x4x2_t +{ + int16x4_t val[2]; +} int16x4x2_t; + +typedef struct int16x8x2_t +{ + int16x8_t val[2]; +} int16x8x2_t; + +typedef struct int32x2x2_t +{ + int32x2_t val[2]; +} int32x2x2_t; + +typedef struct int32x4x2_t +{ + int32x4_t val[2]; +} int32x4x2_t; + +typedef struct int64x1x2_t +{ + int64x1_t val[2]; +} int64x1x2_t; + +typedef struct int64x2x2_t +{ + int64x2_t val[2]; +} int64x2x2_t; + +typedef struct uint8x8x2_t +{ + uint8x8_t val[2]; +} uint8x8x2_t; + +typedef struct uint8x16x2_t +{ + uint8x16_t val[2]; +} uint8x16x2_t; + +typedef struct uint16x4x2_t +{ + uint16x4_t val[2]; +} uint16x4x2_t; + +typedef struct uint16x8x2_t +{ + uint16x8_t val[2]; +} uint16x8x2_t; + +typedef struct uint32x2x2_t +{ + uint32x2_t val[2]; +} uint32x2x2_t; + +typedef struct uint32x4x2_t +{ + uint32x4_t val[2]; +} uint32x4x2_t; + +typedef struct uint64x1x2_t +{ + uint64x1_t val[2]; +} uint64x1x2_t; + +typedef struct uint64x2x2_t +{ + uint64x2_t val[2]; +} uint64x2x2_t; + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +typedef struct float16x4x2_t +{ + float16x4_t val[2]; +} float16x4x2_t; +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +typedef struct float16x8x2_t +{ + float16x8_t val[2]; +} float16x8x2_t; +#endif + +typedef struct float32x2x2_t +{ + float32x2_t val[2]; +} float32x2x2_t; + +typedef struct float32x4x2_t +{ + float32x4_t val[2]; +} float32x4x2_t; + +typedef struct poly8x8x2_t +{ + poly8x8_t val[2]; +} poly8x8x2_t; + +typedef struct poly8x16x2_t +{ + poly8x16_t val[2]; +} poly8x16x2_t; + +typedef struct poly16x4x2_t +{ + poly16x4_t val[2]; +} poly16x4x2_t; + +typedef struct poly16x8x2_t +{ + poly16x8_t val[2]; +} poly16x8x2_t; + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +typedef struct poly64x1x2_t +{ + poly64x1_t val[2]; +} poly64x1x2_t; + + +typedef struct poly64x2x2_t +{ + poly64x2_t val[2]; +} poly64x2x2_t; +#pragma GCC pop_options + + +typedef struct int8x8x3_t +{ + int8x8_t val[3]; +} int8x8x3_t; + +typedef struct int8x16x3_t +{ + int8x16_t val[3]; +} int8x16x3_t; + +typedef struct int16x4x3_t +{ + int16x4_t val[3]; +} int16x4x3_t; + +typedef struct int16x8x3_t +{ + int16x8_t val[3]; +} int16x8x3_t; + +typedef struct int32x2x3_t +{ + int32x2_t val[3]; +} int32x2x3_t; + +typedef struct int32x4x3_t +{ + int32x4_t val[3]; +} int32x4x3_t; + +typedef struct int64x1x3_t +{ + int64x1_t val[3]; +} int64x1x3_t; + +typedef struct int64x2x3_t +{ + int64x2_t val[3]; +} int64x2x3_t; + +typedef struct uint8x8x3_t +{ + uint8x8_t val[3]; +} uint8x8x3_t; + +typedef struct uint8x16x3_t +{ + uint8x16_t val[3]; +} uint8x16x3_t; + +typedef struct uint16x4x3_t +{ + uint16x4_t val[3]; +} uint16x4x3_t; + +typedef struct uint16x8x3_t +{ + uint16x8_t val[3]; +} uint16x8x3_t; + +typedef struct uint32x2x3_t +{ + uint32x2_t val[3]; +} uint32x2x3_t; + +typedef struct uint32x4x3_t +{ + uint32x4_t val[3]; +} uint32x4x3_t; + +typedef struct uint64x1x3_t +{ + uint64x1_t val[3]; +} uint64x1x3_t; + +typedef struct uint64x2x3_t +{ + uint64x2_t val[3]; +} uint64x2x3_t; + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +typedef struct float16x4x3_t +{ + float16x4_t val[3]; +} float16x4x3_t; +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +typedef struct float16x8x3_t +{ + float16x8_t val[3]; +} float16x8x3_t; +#endif + +typedef struct float32x2x3_t +{ + float32x2_t val[3]; +} float32x2x3_t; + +typedef struct float32x4x3_t +{ + float32x4_t val[3]; +} float32x4x3_t; + +typedef struct poly8x8x3_t +{ + poly8x8_t val[3]; +} poly8x8x3_t; + +typedef struct poly8x16x3_t +{ + poly8x16_t val[3]; +} poly8x16x3_t; + +typedef struct poly16x4x3_t +{ + poly16x4_t val[3]; +} poly16x4x3_t; + +typedef struct poly16x8x3_t +{ + poly16x8_t val[3]; +} poly16x8x3_t; + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +typedef struct poly64x1x3_t +{ + poly64x1_t val[3]; +} poly64x1x3_t; + + +typedef struct poly64x2x3_t +{ + poly64x2_t val[3]; +} poly64x2x3_t; +#pragma GCC pop_options + + +typedef struct int8x8x4_t +{ + int8x8_t val[4]; +} int8x8x4_t; + +typedef struct int8x16x4_t +{ + int8x16_t val[4]; +} int8x16x4_t; + +typedef struct int16x4x4_t +{ + int16x4_t val[4]; +} int16x4x4_t; + +typedef struct int16x8x4_t +{ + int16x8_t val[4]; +} int16x8x4_t; + +typedef struct int32x2x4_t +{ + int32x2_t val[4]; +} int32x2x4_t; + +typedef struct int32x4x4_t +{ + int32x4_t val[4]; +} int32x4x4_t; + +typedef struct int64x1x4_t +{ + int64x1_t val[4]; +} int64x1x4_t; + +typedef struct int64x2x4_t +{ + int64x2_t val[4]; +} int64x2x4_t; + +typedef struct uint8x8x4_t +{ + uint8x8_t val[4]; +} uint8x8x4_t; + +typedef struct uint8x16x4_t +{ + uint8x16_t val[4]; +} uint8x16x4_t; + +typedef struct uint16x4x4_t +{ + uint16x4_t val[4]; +} uint16x4x4_t; + +typedef struct uint16x8x4_t +{ + uint16x8_t val[4]; +} uint16x8x4_t; + +typedef struct uint32x2x4_t +{ + uint32x2_t val[4]; +} uint32x2x4_t; + +typedef struct uint32x4x4_t +{ + uint32x4_t val[4]; +} uint32x4x4_t; + +typedef struct uint64x1x4_t +{ + uint64x1_t val[4]; +} uint64x1x4_t; + +typedef struct uint64x2x4_t +{ + uint64x2_t val[4]; +} uint64x2x4_t; + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +typedef struct float16x4x4_t +{ + float16x4_t val[4]; +} float16x4x4_t; +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +typedef struct float16x8x4_t +{ + float16x8_t val[4]; +} float16x8x4_t; +#endif + +typedef struct float32x2x4_t +{ + float32x2_t val[4]; +} float32x2x4_t; + +typedef struct float32x4x4_t +{ + float32x4_t val[4]; +} float32x4x4_t; + +typedef struct poly8x8x4_t +{ + poly8x8_t val[4]; +} poly8x8x4_t; + +typedef struct poly8x16x4_t +{ + poly8x16_t val[4]; +} poly8x16x4_t; + +typedef struct poly16x4x4_t +{ + poly16x4_t val[4]; +} poly16x4x4_t; + +typedef struct poly16x8x4_t +{ + poly16x8_t val[4]; +} poly16x8x4_t; + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +typedef struct poly64x1x4_t +{ + poly64x1_t val[4]; +} poly64x1x4_t; + + +typedef struct poly64x2x4_t +{ + poly64x2_t val[4]; +} poly64x2x4_t; +#pragma GCC pop_options + +/* vadd */ +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vadd_s8 (int8x8_t __a, int8x8_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vadd_s16 (int16x4_t __a, int16x4_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vadd_s32 (int32x2_t __a, int32x2_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vadd_f32 (float32x2_t __a, float32x2_t __b) +{ +#ifdef __FAST_MATH__ + return __a + __b; +#else + return (float32x2_t) __builtin_neon_vaddv2sf (__a, __b); +#endif +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vadd_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vadd_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vadd_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vadd_s64 (int64x1_t __a, int64x1_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vadd_u64 (uint64x1_t __a, uint64x1_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddq_s8 (int8x16_t __a, int8x16_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddq_s16 (int16x8_t __a, int16x8_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddq_s32 (int32x4_t __a, int32x4_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddq_s64 (int64x2_t __a, int64x2_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddq_f32 (float32x4_t __a, float32x4_t __b) +{ +#ifdef __FAST_MATH__ + return __a + __b; +#else + return (float32x4_t) __builtin_neon_vaddv4sf (__a, __b); +#endif +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddq_u64 (uint64x2_t __a, uint64x2_t __b) +{ + return __a + __b; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddl_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int16x8_t)__builtin_neon_vaddlsv8qi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddl_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int32x4_t)__builtin_neon_vaddlsv4hi (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddl_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int64x2_t)__builtin_neon_vaddlsv2si (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddl_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vaddluv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddl_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vaddluv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddl_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint64x2_t)__builtin_neon_vaddluv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddw_s8 (int16x8_t __a, int8x8_t __b) +{ + return (int16x8_t)__builtin_neon_vaddwsv8qi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddw_s16 (int32x4_t __a, int16x4_t __b) +{ + return (int32x4_t)__builtin_neon_vaddwsv4hi (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddw_s32 (int64x2_t __a, int32x2_t __b) +{ + return (int64x2_t)__builtin_neon_vaddwsv2si (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddw_u8 (uint16x8_t __a, uint8x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vaddwuv8qi ((int16x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddw_u16 (uint32x4_t __a, uint16x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vaddwuv4hi ((int32x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddw_u32 (uint64x2_t __a, uint32x2_t __b) +{ + return (uint64x2_t)__builtin_neon_vaddwuv2si ((int64x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhadd_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int8x8_t)__builtin_neon_vhaddsv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhadd_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x4_t)__builtin_neon_vhaddsv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhadd_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x2_t)__builtin_neon_vhaddsv2si (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhadd_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vhadduv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhadd_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vhadduv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhadd_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vhadduv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhaddq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (int8x16_t)__builtin_neon_vhaddsv16qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhaddq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int16x8_t)__builtin_neon_vhaddsv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhaddq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int32x4_t)__builtin_neon_vhaddsv4si (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhaddq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vhadduv16qi ((int8x16_t) __a, (int8x16_t) __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhaddq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vhadduv8hi ((int16x8_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhaddq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vhadduv4si ((int32x4_t) __a, (int32x4_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrhadd_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int8x8_t)__builtin_neon_vrhaddsv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrhadd_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x4_t)__builtin_neon_vrhaddsv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrhadd_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x2_t)__builtin_neon_vrhaddsv2si (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrhadd_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vrhadduv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrhadd_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vrhadduv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrhadd_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vrhadduv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrhaddq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (int8x16_t)__builtin_neon_vrhaddsv16qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrhaddq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int16x8_t)__builtin_neon_vrhaddsv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrhaddq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int32x4_t)__builtin_neon_vrhaddsv4si (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vrhadduv16qi ((int8x16_t) __a, (int8x16_t) __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vrhadduv8hi ((int16x8_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vrhadduv4si ((int32x4_t) __a, (int32x4_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqadd_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int8x8_t)__builtin_neon_vqaddsv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqadd_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x4_t)__builtin_neon_vqaddsv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqadd_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x2_t)__builtin_neon_vqaddsv2si (__a, __b); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqadd_s64 (int64x1_t __a, int64x1_t __b) +{ + return (int64x1_t)__builtin_neon_vqaddsdi (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqadd_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vqadduv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqadd_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vqadduv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqadd_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vqadduv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqadd_u64 (uint64x1_t __a, uint64x1_t __b) +{ + return (uint64x1_t)__builtin_neon_vqaddudi ((int64x1_t) __a, (int64x1_t) __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqaddq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (int8x16_t)__builtin_neon_vqaddsv16qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqaddq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int16x8_t)__builtin_neon_vqaddsv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqaddq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int32x4_t)__builtin_neon_vqaddsv4si (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqaddq_s64 (int64x2_t __a, int64x2_t __b) +{ + return (int64x2_t)__builtin_neon_vqaddsv2di (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqaddq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vqadduv16qi ((int8x16_t) __a, (int8x16_t) __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqaddq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vqadduv8hi ((int16x8_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqaddq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vqadduv4si ((int32x4_t) __a, (int32x4_t) __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqaddq_u64 (uint64x2_t __a, uint64x2_t __b) +{ + return (uint64x2_t)__builtin_neon_vqadduv2di ((int64x2_t) __a, (int64x2_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddhn_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int8x8_t)__builtin_neon_vaddhnv8hi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddhn_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int16x4_t)__builtin_neon_vaddhnv4si (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddhn_s64 (int64x2_t __a, int64x2_t __b) +{ + return (int32x2_t)__builtin_neon_vaddhnv2di (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddhn_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vaddhnv8hi ((int16x8_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddhn_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vaddhnv4si ((int32x4_t) __a, (int32x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddhn_u64 (uint64x2_t __a, uint64x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vaddhnv2di ((int64x2_t) __a, (int64x2_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vraddhn_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int8x8_t)__builtin_neon_vraddhnv8hi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vraddhn_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int16x4_t)__builtin_neon_vraddhnv4si (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vraddhn_s64 (int64x2_t __a, int64x2_t __b) +{ + return (int32x2_t)__builtin_neon_vraddhnv2di (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vraddhn_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vraddhnv8hi ((int16x8_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vraddhn_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vraddhnv4si ((int32x4_t) __a, (int32x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vraddhn_u64 (uint64x2_t __a, uint64x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vraddhnv2di ((int64x2_t) __a, (int64x2_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_s8 (int8x8_t __a, int8x8_t __b) +{ + return __a * __b; +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_s16 (int16x4_t __a, int16x4_t __b) +{ + return __a * __b; +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_s32 (int32x2_t __a, int32x2_t __b) +{ + return __a * __b; +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_f32 (float32x2_t __a, float32x2_t __b) +{ +#ifdef __FAST_MATH__ + return __a * __b; +#else + return (float32x2_t) __builtin_neon_vmulfv2sf (__a, __b); +#endif + +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return __a * __b; +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return __a * __b; +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return __a * __b; +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_s8 (int8x16_t __a, int8x16_t __b) +{ + return __a * __b; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_s16 (int16x8_t __a, int16x8_t __b) +{ + return __a * __b; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_s32 (int32x4_t __a, int32x4_t __b) +{ + return __a * __b; +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_f32 (float32x4_t __a, float32x4_t __b) +{ +#ifdef __FAST_MATH__ + return __a * __b; +#else + return (float32x4_t) __builtin_neon_vmulfv4sf (__a, __b); +#endif +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return __a * __b; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return __a * __b; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return __a * __b; +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_p8 (poly8x8_t __a, poly8x8_t __b) +{ + return (poly8x8_t)__builtin_neon_vmulpv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_p8 (poly8x16_t __a, poly8x16_t __b) +{ + return (poly8x16_t)__builtin_neon_vmulpv16qi ((int8x16_t) __a, (int8x16_t) __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmulh_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x4_t)__builtin_neon_vqdmulhv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmulh_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x2_t)__builtin_neon_vqdmulhv2si (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmulhq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int16x8_t)__builtin_neon_vqdmulhv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmulhq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int32x4_t)__builtin_neon_vqdmulhv4si (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmulh_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x4_t)__builtin_neon_vqrdmulhv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmulh_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x2_t)__builtin_neon_vqrdmulhv2si (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int16x8_t)__builtin_neon_vqrdmulhv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int32x4_t)__builtin_neon_vqrdmulhv4si (__a, __b); +} + +#ifdef __ARM_FEATURE_QRDMX +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmlah_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c) +{ + return (int16x4_t)__builtin_neon_vqrdmlahv4hi (__a, __b, __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmlah_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c) +{ + return (int32x2_t)__builtin_neon_vqrdmlahv2si (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmlahq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c) +{ + return (int16x8_t)__builtin_neon_vqrdmlahv8hi (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmlahq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c) +{ + return (int32x4_t)__builtin_neon_vqrdmlahv4si (__a, __b, __c); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmlsh_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c) +{ + return (int16x4_t)__builtin_neon_vqrdmlshv4hi (__a, __b, __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmlsh_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c) +{ + return (int32x2_t)__builtin_neon_vqrdmlshv2si (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmlshq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c) +{ + return (int16x8_t)__builtin_neon_vqrdmlshv8hi (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmlshq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c) +{ + return (int32x4_t)__builtin_neon_vqrdmlshv4si (__a, __b, __c); +} +#endif + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int16x8_t)__builtin_neon_vmullsv8qi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int32x4_t)__builtin_neon_vmullsv4hi (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int64x2_t)__builtin_neon_vmullsv2si (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vmulluv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vmulluv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint64x2_t)__builtin_neon_vmulluv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_p8 (poly8x8_t __a, poly8x8_t __b) +{ + return (poly16x8_t)__builtin_neon_vmullpv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmull_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int32x4_t)__builtin_neon_vqdmullv4hi (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmull_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int64x2_t)__builtin_neon_vqdmullv2si (__a, __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmla_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c) +{ + return (int8x8_t)__builtin_neon_vmlav8qi (__a, __b, __c); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmla_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c) +{ + return (int16x4_t)__builtin_neon_vmlav4hi (__a, __b, __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmla_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c) +{ + return (int32x2_t)__builtin_neon_vmlav2si (__a, __b, __c); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmla_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c) +{ + return (float32x2_t)__builtin_neon_vmlav2sf (__a, __b, __c); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmla_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) +{ + return (uint8x8_t)__builtin_neon_vmlav8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmla_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c) +{ + return (uint16x4_t)__builtin_neon_vmlav4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmla_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c) +{ + return (uint32x2_t)__builtin_neon_vmlav2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlaq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c) +{ + return (int8x16_t)__builtin_neon_vmlav16qi (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlaq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c) +{ + return (int16x8_t)__builtin_neon_vmlav8hi (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlaq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c) +{ + return (int32x4_t)__builtin_neon_vmlav4si (__a, __b, __c); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) +{ + return (float32x4_t)__builtin_neon_vmlav4sf (__a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlaq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) +{ + return (uint8x16_t)__builtin_neon_vmlav16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlaq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c) +{ + return (uint16x8_t)__builtin_neon_vmlav8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlaq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) +{ + return (uint32x4_t)__builtin_neon_vmlav4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c) +{ + return (int16x8_t)__builtin_neon_vmlalsv8qi (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c) +{ + return (int32x4_t)__builtin_neon_vmlalsv4hi (__a, __b, __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c) +{ + return (int64x2_t)__builtin_neon_vmlalsv2si (__a, __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c) +{ + return (uint16x8_t)__builtin_neon_vmlaluv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c) +{ + return (uint32x4_t)__builtin_neon_vmlaluv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c) +{ + return (uint64x2_t)__builtin_neon_vmlaluv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c) +{ + return (int32x4_t)__builtin_neon_vqdmlalv4hi (__a, __b, __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c) +{ + return (int64x2_t)__builtin_neon_vqdmlalv2si (__a, __b, __c); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmls_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c) +{ + return (int8x8_t)__builtin_neon_vmlsv8qi (__a, __b, __c); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmls_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c) +{ + return (int16x4_t)__builtin_neon_vmlsv4hi (__a, __b, __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmls_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c) +{ + return (int32x2_t)__builtin_neon_vmlsv2si (__a, __b, __c); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmls_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c) +{ + return (float32x2_t)__builtin_neon_vmlsv2sf (__a, __b, __c); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmls_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) +{ + return (uint8x8_t)__builtin_neon_vmlsv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmls_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c) +{ + return (uint16x4_t)__builtin_neon_vmlsv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmls_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c) +{ + return (uint32x2_t)__builtin_neon_vmlsv2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c) +{ + return (int8x16_t)__builtin_neon_vmlsv16qi (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c) +{ + return (int16x8_t)__builtin_neon_vmlsv8hi (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c) +{ + return (int32x4_t)__builtin_neon_vmlsv4si (__a, __b, __c); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) +{ + return (float32x4_t)__builtin_neon_vmlsv4sf (__a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) +{ + return (uint8x16_t)__builtin_neon_vmlsv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c) +{ + return (uint16x8_t)__builtin_neon_vmlsv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) +{ + return (uint32x4_t)__builtin_neon_vmlsv4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsl_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c) +{ + return (int16x8_t)__builtin_neon_vmlslsv8qi (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c) +{ + return (int32x4_t)__builtin_neon_vmlslsv4hi (__a, __b, __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c) +{ + return (int64x2_t)__builtin_neon_vmlslsv2si (__a, __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsl_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c) +{ + return (uint16x8_t)__builtin_neon_vmlsluv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsl_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c) +{ + return (uint32x4_t)__builtin_neon_vmlsluv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsl_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c) +{ + return (uint64x2_t)__builtin_neon_vmlsluv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c) +{ + return (int32x4_t)__builtin_neon_vqdmlslv4hi (__a, __b, __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c) +{ + return (int64x2_t)__builtin_neon_vqdmlslv2si (__a, __b, __c); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=neon-vfpv4") +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vfma_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c) +{ + return (float32x2_t)__builtin_neon_vfmav2sf (__a, __b, __c); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vfmaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) +{ + return (float32x4_t)__builtin_neon_vfmav4sf (__a, __b, __c); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vfms_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c) +{ + return (float32x2_t)__builtin_neon_vfmsv2sf (__a, __b, __c); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vfmsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c) +{ + return (float32x4_t)__builtin_neon_vfmsv4sf (__a, __b, __c); +} +#pragma GCC pop_options + +#if __ARM_ARCH >= 8 +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndn_f32 (float32x2_t __a) +{ + return (float32x2_t)__builtin_neon_vrintnv2sf (__a); +} + +#endif +#if __ARM_ARCH >= 8 +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndnq_f32 (float32x4_t __a) +{ + return (float32x4_t)__builtin_neon_vrintnv4sf (__a); +} + +#endif +#if __ARM_ARCH >= 8 +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrnda_f32 (float32x2_t __a) +{ + return (float32x2_t)__builtin_neon_vrintav2sf (__a); +} + +#endif +#if __ARM_ARCH >= 8 +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndaq_f32 (float32x4_t __a) +{ + return (float32x4_t)__builtin_neon_vrintav4sf (__a); +} + +#endif +#if __ARM_ARCH >= 8 +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndp_f32 (float32x2_t __a) +{ + return (float32x2_t)__builtin_neon_vrintpv2sf (__a); +} + +#endif +#if __ARM_ARCH >= 8 +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndpq_f32 (float32x4_t __a) +{ + return (float32x4_t)__builtin_neon_vrintpv4sf (__a); +} + +#endif +#if __ARM_ARCH >= 8 +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndm_f32 (float32x2_t __a) +{ + return (float32x2_t)__builtin_neon_vrintmv2sf (__a); +} + +#endif +#if __ARM_ARCH >= 8 +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndmq_f32 (float32x4_t __a) +{ + return (float32x4_t)__builtin_neon_vrintmv4sf (__a); +} + +#endif + +#if __ARM_ARCH >= 8 +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndx_f32 (float32x2_t __a) +{ + return (float32x2_t)__builtin_neon_vrintxv2sf (__a); +} + +#endif + +#if __ARM_ARCH >= 8 +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndxq_f32 (float32x4_t __a) +{ + return (float32x4_t)__builtin_neon_vrintxv4sf (__a); +} + +#endif + +#if __ARM_ARCH >= 8 +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrnd_f32 (float32x2_t __a) +{ + return (float32x2_t)__builtin_neon_vrintzv2sf (__a); +} + +#endif +#if __ARM_ARCH >= 8 +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndq_f32 (float32x4_t __a) +{ + return (float32x4_t)__builtin_neon_vrintzv4sf (__a); +} + +#endif + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsub_s8 (int8x8_t __a, int8x8_t __b) +{ + return __a - __b; +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsub_s16 (int16x4_t __a, int16x4_t __b) +{ + return __a - __b; +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsub_s32 (int32x2_t __a, int32x2_t __b) +{ + return __a - __b; +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsub_f32 (float32x2_t __a, float32x2_t __b) +{ +#ifdef __FAST_MATH__ + return __a - __b; +#else + return (float32x2_t) __builtin_neon_vsubv2sf (__a, __b); +#endif +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsub_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return __a - __b; +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsub_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return __a - __b; +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsub_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return __a - __b; +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsub_s64 (int64x1_t __a, int64x1_t __b) +{ + return __a - __b; +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsub_u64 (uint64x1_t __a, uint64x1_t __b) +{ + return __a - __b; +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubq_s8 (int8x16_t __a, int8x16_t __b) +{ + return __a - __b; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubq_s16 (int16x8_t __a, int16x8_t __b) +{ + return __a - __b; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubq_s32 (int32x4_t __a, int32x4_t __b) +{ + return __a - __b; +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubq_s64 (int64x2_t __a, int64x2_t __b) +{ + return __a - __b; +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubq_f32 (float32x4_t __a, float32x4_t __b) +{ +#ifdef __FAST_MATH__ + return __a - __b; +#else + return (float32x4_t) __builtin_neon_vsubv4sf (__a, __b); +#endif +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return __a - __b; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return __a - __b; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return __a - __b; +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubq_u64 (uint64x2_t __a, uint64x2_t __b) +{ + return __a - __b; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubl_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int16x8_t)__builtin_neon_vsublsv8qi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubl_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int32x4_t)__builtin_neon_vsublsv4hi (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubl_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int64x2_t)__builtin_neon_vsublsv2si (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubl_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vsubluv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubl_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vsubluv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubl_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint64x2_t)__builtin_neon_vsubluv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubw_s8 (int16x8_t __a, int8x8_t __b) +{ + return (int16x8_t)__builtin_neon_vsubwsv8qi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubw_s16 (int32x4_t __a, int16x4_t __b) +{ + return (int32x4_t)__builtin_neon_vsubwsv4hi (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubw_s32 (int64x2_t __a, int32x2_t __b) +{ + return (int64x2_t)__builtin_neon_vsubwsv2si (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubw_u8 (uint16x8_t __a, uint8x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vsubwuv8qi ((int16x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubw_u16 (uint32x4_t __a, uint16x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vsubwuv4hi ((int32x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubw_u32 (uint64x2_t __a, uint32x2_t __b) +{ + return (uint64x2_t)__builtin_neon_vsubwuv2si ((int64x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhsub_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int8x8_t)__builtin_neon_vhsubsv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhsub_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x4_t)__builtin_neon_vhsubsv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhsub_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x2_t)__builtin_neon_vhsubsv2si (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhsub_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vhsubuv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhsub_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vhsubuv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhsub_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vhsubuv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhsubq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (int8x16_t)__builtin_neon_vhsubsv16qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhsubq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int16x8_t)__builtin_neon_vhsubsv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhsubq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int32x4_t)__builtin_neon_vhsubsv4si (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhsubq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vhsubuv16qi ((int8x16_t) __a, (int8x16_t) __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhsubq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vhsubuv8hi ((int16x8_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vhsubq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vhsubuv4si ((int32x4_t) __a, (int32x4_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqsub_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int8x8_t)__builtin_neon_vqsubsv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqsub_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x4_t)__builtin_neon_vqsubsv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqsub_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x2_t)__builtin_neon_vqsubsv2si (__a, __b); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqsub_s64 (int64x1_t __a, int64x1_t __b) +{ + return (int64x1_t)__builtin_neon_vqsubsdi (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqsub_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vqsubuv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqsub_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vqsubuv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqsub_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vqsubuv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqsub_u64 (uint64x1_t __a, uint64x1_t __b) +{ + return (uint64x1_t)__builtin_neon_vqsubudi ((int64x1_t) __a, (int64x1_t) __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqsubq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (int8x16_t)__builtin_neon_vqsubsv16qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqsubq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int16x8_t)__builtin_neon_vqsubsv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqsubq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int32x4_t)__builtin_neon_vqsubsv4si (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqsubq_s64 (int64x2_t __a, int64x2_t __b) +{ + return (int64x2_t)__builtin_neon_vqsubsv2di (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqsubq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vqsubuv16qi ((int8x16_t) __a, (int8x16_t) __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqsubq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vqsubuv8hi ((int16x8_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqsubq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vqsubuv4si ((int32x4_t) __a, (int32x4_t) __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqsubq_u64 (uint64x2_t __a, uint64x2_t __b) +{ + return (uint64x2_t)__builtin_neon_vqsubuv2di ((int64x2_t) __a, (int64x2_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubhn_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int8x8_t)__builtin_neon_vsubhnv8hi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubhn_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int16x4_t)__builtin_neon_vsubhnv4si (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubhn_s64 (int64x2_t __a, int64x2_t __b) +{ + return (int32x2_t)__builtin_neon_vsubhnv2di (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubhn_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vsubhnv8hi ((int16x8_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubhn_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vsubhnv4si ((int32x4_t) __a, (int32x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubhn_u64 (uint64x2_t __a, uint64x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vsubhnv2di ((int64x2_t) __a, (int64x2_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsubhn_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int8x8_t)__builtin_neon_vrsubhnv8hi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsubhn_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int16x4_t)__builtin_neon_vrsubhnv4si (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsubhn_s64 (int64x2_t __a, int64x2_t __b) +{ + return (int32x2_t)__builtin_neon_vrsubhnv2di (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsubhn_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vrsubhnv8hi ((int16x8_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsubhn_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vrsubhnv4si ((int32x4_t) __a, (int32x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsubhn_u64 (uint64x2_t __a, uint64x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vrsubhnv2di ((int64x2_t) __a, (int64x2_t) __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceq_s8 (int8x8_t __a, int8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vceqv8qi (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceq_s16 (int16x4_t __a, int16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vceqv4hi (__a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceq_s32 (int32x2_t __a, int32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vceqv2si (__a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceq_f32 (float32x2_t __a, float32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vceqv2sf (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceq_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vceqv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceq_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vceqv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceq_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vceqv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceq_p8 (poly8x8_t __a, poly8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vceqv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceqq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vceqv16qi (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceqq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vceqv8hi (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceqq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vceqv4si (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceqq_f32 (float32x4_t __a, float32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vceqv4sf (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceqq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vceqv16qi ((int8x16_t) __a, (int8x16_t) __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceqq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vceqv8hi ((int16x8_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceqq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vceqv4si ((int32x4_t) __a, (int32x4_t) __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceqq_p8 (poly8x16_t __a, poly8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vceqv16qi ((int8x16_t) __a, (int8x16_t) __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcge_s8 (int8x8_t __a, int8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vcgev8qi (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcge_s16 (int16x4_t __a, int16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vcgev4hi (__a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcge_s32 (int32x2_t __a, int32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vcgev2si (__a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcge_f32 (float32x2_t __a, float32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vcgev2sf (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcge_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vcgeuv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcge_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vcgeuv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcge_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vcgeuv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgeq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vcgev16qi (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgeq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vcgev8hi (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgeq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vcgev4si (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgeq_f32 (float32x4_t __a, float32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vcgev4sf (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgeq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vcgeuv16qi ((int8x16_t) __a, (int8x16_t) __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgeq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vcgeuv8hi ((int16x8_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgeq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vcgeuv4si ((int32x4_t) __a, (int32x4_t) __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcle_s8 (int8x8_t __a, int8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vcgev8qi (__b, __a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcle_s16 (int16x4_t __a, int16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vcgev4hi (__b, __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcle_s32 (int32x2_t __a, int32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vcgev2si (__b, __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcle_f32 (float32x2_t __a, float32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vcgev2sf (__b, __a); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcle_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vcgeuv8qi ((int8x8_t) __b, (int8x8_t) __a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcle_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vcgeuv4hi ((int16x4_t) __b, (int16x4_t) __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcle_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vcgeuv2si ((int32x2_t) __b, (int32x2_t) __a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcleq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vcgev16qi (__b, __a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcleq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vcgev8hi (__b, __a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcleq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vcgev4si (__b, __a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcleq_f32 (float32x4_t __a, float32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vcgev4sf (__b, __a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcleq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vcgeuv16qi ((int8x16_t) __b, (int8x16_t) __a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcleq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vcgeuv8hi ((int16x8_t) __b, (int16x8_t) __a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcleq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vcgeuv4si ((int32x4_t) __b, (int32x4_t) __a); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgt_s8 (int8x8_t __a, int8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vcgtv8qi (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgt_s16 (int16x4_t __a, int16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vcgtv4hi (__a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgt_s32 (int32x2_t __a, int32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vcgtv2si (__a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgt_f32 (float32x2_t __a, float32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vcgtv2sf (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgt_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vcgtuv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgt_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vcgtuv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgt_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vcgtuv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgtq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vcgtv16qi (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgtq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vcgtv8hi (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgtq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vcgtv4si (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgtq_f32 (float32x4_t __a, float32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vcgtv4sf (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgtq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vcgtuv16qi ((int8x16_t) __a, (int8x16_t) __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgtq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vcgtuv8hi ((int16x8_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgtq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vcgtuv4si ((int32x4_t) __a, (int32x4_t) __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclt_s8 (int8x8_t __a, int8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vcgtv8qi (__b, __a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclt_s16 (int16x4_t __a, int16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vcgtv4hi (__b, __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclt_s32 (int32x2_t __a, int32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vcgtv2si (__b, __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclt_f32 (float32x2_t __a, float32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vcgtv2sf (__b, __a); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclt_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vcgtuv8qi ((int8x8_t) __b, (int8x8_t) __a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclt_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vcgtuv4hi ((int16x4_t) __b, (int16x4_t) __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclt_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vcgtuv2si ((int32x2_t) __b, (int32x2_t) __a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcltq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vcgtv16qi (__b, __a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcltq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vcgtv8hi (__b, __a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcltq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vcgtv4si (__b, __a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcltq_f32 (float32x4_t __a, float32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vcgtv4sf (__b, __a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcltq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vcgtuv16qi ((int8x16_t) __b, (int8x16_t) __a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcltq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vcgtuv8hi ((int16x8_t) __b, (int16x8_t) __a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcltq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vcgtuv4si ((int32x4_t) __b, (int32x4_t) __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcage_f32 (float32x2_t __a, float32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vcagev2sf (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcageq_f32 (float32x4_t __a, float32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vcagev4sf (__a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcale_f32 (float32x2_t __a, float32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vcagev2sf (__b, __a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcaleq_f32 (float32x4_t __a, float32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vcagev4sf (__b, __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcagt_f32 (float32x2_t __a, float32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vcagtv2sf (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcagtq_f32 (float32x4_t __a, float32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vcagtv4sf (__a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcalt_f32 (float32x2_t __a, float32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vcagtv2sf (__b, __a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcaltq_f32 (float32x4_t __a, float32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vcagtv4sf (__b, __a); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtst_s8 (int8x8_t __a, int8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vtstv8qi (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtst_s16 (int16x4_t __a, int16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vtstv4hi (__a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtst_s32 (int32x2_t __a, int32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vtstv2si (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtst_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vtstv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtst_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vtstv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtst_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vtstv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtst_p8 (poly8x8_t __a, poly8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vtstv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtst_p16 (poly16x4_t __a, poly16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vtstv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtstq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vtstv16qi (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtstq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vtstv8hi (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtstq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vtstv4si (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtstq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vtstv16qi ((int8x16_t) __a, (int8x16_t) __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtstq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vtstv8hi ((int16x8_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtstq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vtstv4si ((int32x4_t) __a, (int32x4_t) __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtstq_p8 (poly8x16_t __a, poly8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vtstv16qi ((int8x16_t) __a, (int8x16_t) __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtstq_p16 (poly16x8_t __a, poly16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vtstv8hi ((int16x8_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabd_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int8x8_t)__builtin_neon_vabdsv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabd_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x4_t)__builtin_neon_vabdsv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabd_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x2_t)__builtin_neon_vabdsv2si (__a, __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabd_f32 (float32x2_t __a, float32x2_t __b) +{ + return (float32x2_t)__builtin_neon_vabdfv2sf (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabd_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vabduv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabd_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vabduv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabd_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vabduv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabdq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (int8x16_t)__builtin_neon_vabdsv16qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabdq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int16x8_t)__builtin_neon_vabdsv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabdq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int32x4_t)__builtin_neon_vabdsv4si (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabdq_f32 (float32x4_t __a, float32x4_t __b) +{ + return (float32x4_t)__builtin_neon_vabdfv4sf (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabdq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vabduv16qi ((int8x16_t) __a, (int8x16_t) __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabdq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vabduv8hi ((int16x8_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabdq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vabduv4si ((int32x4_t) __a, (int32x4_t) __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabdl_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int16x8_t)__builtin_neon_vabdlsv8qi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabdl_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int32x4_t)__builtin_neon_vabdlsv4hi (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabdl_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int64x2_t)__builtin_neon_vabdlsv2si (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabdl_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vabdluv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabdl_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vabdluv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabdl_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint64x2_t)__builtin_neon_vabdluv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaba_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c) +{ + return (int8x8_t)__builtin_neon_vabasv8qi (__a, __b, __c); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaba_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c) +{ + return (int16x4_t)__builtin_neon_vabasv4hi (__a, __b, __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaba_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c) +{ + return (int32x2_t)__builtin_neon_vabasv2si (__a, __b, __c); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaba_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) +{ + return (uint8x8_t)__builtin_neon_vabauv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaba_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c) +{ + return (uint16x4_t)__builtin_neon_vabauv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaba_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c) +{ + return (uint32x2_t)__builtin_neon_vabauv2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabaq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c) +{ + return (int8x16_t)__builtin_neon_vabasv16qi (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabaq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c) +{ + return (int16x8_t)__builtin_neon_vabasv8hi (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabaq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c) +{ + return (int32x4_t)__builtin_neon_vabasv4si (__a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabaq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) +{ + return (uint8x16_t)__builtin_neon_vabauv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabaq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c) +{ + return (uint16x8_t)__builtin_neon_vabauv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabaq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) +{ + return (uint32x4_t)__builtin_neon_vabauv4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabal_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c) +{ + return (int16x8_t)__builtin_neon_vabalsv8qi (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c) +{ + return (int32x4_t)__builtin_neon_vabalsv4hi (__a, __b, __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c) +{ + return (int64x2_t)__builtin_neon_vabalsv2si (__a, __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabal_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c) +{ + return (uint16x8_t)__builtin_neon_vabaluv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabal_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c) +{ + return (uint32x4_t)__builtin_neon_vabaluv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabal_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c) +{ + return (uint64x2_t)__builtin_neon_vabaluv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmax_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int8x8_t)__builtin_neon_vmaxsv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmax_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x4_t)__builtin_neon_vmaxsv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmax_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x2_t)__builtin_neon_vmaxsv2si (__a, __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmax_f32 (float32x2_t __a, float32x2_t __b) +{ + return (float32x2_t)__builtin_neon_vmaxfv2sf (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmax_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vmaxuv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmax_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vmaxuv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmax_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vmaxuv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmaxq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (int8x16_t)__builtin_neon_vmaxsv16qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmaxq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int16x8_t)__builtin_neon_vmaxsv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmaxq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int32x4_t)__builtin_neon_vmaxsv4si (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmaxq_f32 (float32x4_t __a, float32x4_t __b) +{ + return (float32x4_t)__builtin_neon_vmaxfv4sf (__a, __b); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=neon-fp-armv8") +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmaxnm_f32 (float32x2_t a, float32x2_t b) +{ + return (float32x2_t)__builtin_neon_vmaxnmv2sf (a, b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmaxnmq_f32 (float32x4_t a, float32x4_t b) +{ + return (float32x4_t)__builtin_neon_vmaxnmv4sf (a, b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vminnm_f32 (float32x2_t a, float32x2_t b) +{ + return (float32x2_t)__builtin_neon_vminnmv2sf (a, b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vminnmq_f32 (float32x4_t a, float32x4_t b) +{ + return (float32x4_t)__builtin_neon_vminnmv4sf (a, b); +} +#pragma GCC pop_options + + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmaxq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vmaxuv16qi ((int8x16_t) __a, (int8x16_t) __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmaxq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vmaxuv8hi ((int16x8_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmaxq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vmaxuv4si ((int32x4_t) __a, (int32x4_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmin_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int8x8_t)__builtin_neon_vminsv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmin_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x4_t)__builtin_neon_vminsv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmin_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x2_t)__builtin_neon_vminsv2si (__a, __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmin_f32 (float32x2_t __a, float32x2_t __b) +{ + return (float32x2_t)__builtin_neon_vminfv2sf (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmin_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vminuv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmin_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vminuv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmin_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vminuv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vminq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (int8x16_t)__builtin_neon_vminsv16qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vminq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int16x8_t)__builtin_neon_vminsv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vminq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int32x4_t)__builtin_neon_vminsv4si (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vminq_f32 (float32x4_t __a, float32x4_t __b) +{ + return (float32x4_t)__builtin_neon_vminfv4sf (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vminq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vminuv16qi ((int8x16_t) __a, (int8x16_t) __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vminq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vminuv8hi ((int16x8_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vminq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vminuv4si ((int32x4_t) __a, (int32x4_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadd_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int8x8_t)__builtin_neon_vpaddv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadd_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x4_t)__builtin_neon_vpaddv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadd_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x2_t)__builtin_neon_vpaddv2si (__a, __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadd_f32 (float32x2_t __a, float32x2_t __b) +{ + return (float32x2_t)__builtin_neon_vpaddv2sf (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadd_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vpaddv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadd_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vpaddv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadd_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vpaddv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpaddl_s8 (int8x8_t __a) +{ + return (int16x4_t)__builtin_neon_vpaddlsv8qi (__a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpaddl_s16 (int16x4_t __a) +{ + return (int32x2_t)__builtin_neon_vpaddlsv4hi (__a); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpaddl_s32 (int32x2_t __a) +{ + return (int64x1_t)__builtin_neon_vpaddlsv2si (__a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpaddl_u8 (uint8x8_t __a) +{ + return (uint16x4_t)__builtin_neon_vpaddluv8qi ((int8x8_t) __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpaddl_u16 (uint16x4_t __a) +{ + return (uint32x2_t)__builtin_neon_vpaddluv4hi ((int16x4_t) __a); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpaddl_u32 (uint32x2_t __a) +{ + return (uint64x1_t)__builtin_neon_vpaddluv2si ((int32x2_t) __a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpaddlq_s8 (int8x16_t __a) +{ + return (int16x8_t)__builtin_neon_vpaddlsv16qi (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpaddlq_s16 (int16x8_t __a) +{ + return (int32x4_t)__builtin_neon_vpaddlsv8hi (__a); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpaddlq_s32 (int32x4_t __a) +{ + return (int64x2_t)__builtin_neon_vpaddlsv4si (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpaddlq_u8 (uint8x16_t __a) +{ + return (uint16x8_t)__builtin_neon_vpaddluv16qi ((int8x16_t) __a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpaddlq_u16 (uint16x8_t __a) +{ + return (uint32x4_t)__builtin_neon_vpaddluv8hi ((int16x8_t) __a); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpaddlq_u32 (uint32x4_t __a) +{ + return (uint64x2_t)__builtin_neon_vpaddluv4si ((int32x4_t) __a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadal_s8 (int16x4_t __a, int8x8_t __b) +{ + return (int16x4_t)__builtin_neon_vpadalsv8qi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadal_s16 (int32x2_t __a, int16x4_t __b) +{ + return (int32x2_t)__builtin_neon_vpadalsv4hi (__a, __b); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadal_s32 (int64x1_t __a, int32x2_t __b) +{ + return (int64x1_t)__builtin_neon_vpadalsv2si (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadal_u8 (uint16x4_t __a, uint8x8_t __b) +{ + return (uint16x4_t)__builtin_neon_vpadaluv8qi ((int16x4_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadal_u16 (uint32x2_t __a, uint16x4_t __b) +{ + return (uint32x2_t)__builtin_neon_vpadaluv4hi ((int32x2_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadal_u32 (uint64x1_t __a, uint32x2_t __b) +{ + return (uint64x1_t)__builtin_neon_vpadaluv2si ((int64x1_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadalq_s8 (int16x8_t __a, int8x16_t __b) +{ + return (int16x8_t)__builtin_neon_vpadalsv16qi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadalq_s16 (int32x4_t __a, int16x8_t __b) +{ + return (int32x4_t)__builtin_neon_vpadalsv8hi (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadalq_s32 (int64x2_t __a, int32x4_t __b) +{ + return (int64x2_t)__builtin_neon_vpadalsv4si (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadalq_u8 (uint16x8_t __a, uint8x16_t __b) +{ + return (uint16x8_t)__builtin_neon_vpadaluv16qi ((int16x8_t) __a, (int8x16_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadalq_u16 (uint32x4_t __a, uint16x8_t __b) +{ + return (uint32x4_t)__builtin_neon_vpadaluv8hi ((int32x4_t) __a, (int16x8_t) __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadalq_u32 (uint64x2_t __a, uint32x4_t __b) +{ + return (uint64x2_t)__builtin_neon_vpadaluv4si ((int64x2_t) __a, (int32x4_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpmax_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int8x8_t)__builtin_neon_vpmaxsv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpmax_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x4_t)__builtin_neon_vpmaxsv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpmax_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x2_t)__builtin_neon_vpmaxsv2si (__a, __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpmax_f32 (float32x2_t __a, float32x2_t __b) +{ + return (float32x2_t)__builtin_neon_vpmaxfv2sf (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpmax_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vpmaxuv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpmax_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vpmaxuv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpmax_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vpmaxuv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpmin_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int8x8_t)__builtin_neon_vpminsv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpmin_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x4_t)__builtin_neon_vpminsv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpmin_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x2_t)__builtin_neon_vpminsv2si (__a, __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpmin_f32 (float32x2_t __a, float32x2_t __b) +{ + return (float32x2_t)__builtin_neon_vpminfv2sf (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpmin_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vpminuv8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpmin_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vpminuv4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpmin_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vpminuv2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrecps_f32 (float32x2_t __a, float32x2_t __b) +{ + return (float32x2_t)__builtin_neon_vrecpsv2sf (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrecpsq_f32 (float32x4_t __a, float32x4_t __b) +{ + return (float32x4_t)__builtin_neon_vrecpsv4sf (__a, __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsqrts_f32 (float32x2_t __a, float32x2_t __b) +{ + return (float32x2_t)__builtin_neon_vrsqrtsv2sf (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsqrtsq_f32 (float32x4_t __a, float32x4_t __b) +{ + return (float32x4_t)__builtin_neon_vrsqrtsv4sf (__a, __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshl_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int8x8_t)__builtin_neon_vshlsv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshl_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x4_t)__builtin_neon_vshlsv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshl_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x2_t)__builtin_neon_vshlsv2si (__a, __b); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshl_s64 (int64x1_t __a, int64x1_t __b) +{ + return (int64x1_t)__builtin_neon_vshlsdi (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshl_u8 (uint8x8_t __a, int8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vshluv8qi ((int8x8_t) __a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshl_u16 (uint16x4_t __a, int16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vshluv4hi ((int16x4_t) __a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshl_u32 (uint32x2_t __a, int32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vshluv2si ((int32x2_t) __a, __b); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshl_u64 (uint64x1_t __a, int64x1_t __b) +{ + return (uint64x1_t)__builtin_neon_vshludi ((int64x1_t) __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshlq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (int8x16_t)__builtin_neon_vshlsv16qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshlq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int16x8_t)__builtin_neon_vshlsv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshlq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int32x4_t)__builtin_neon_vshlsv4si (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshlq_s64 (int64x2_t __a, int64x2_t __b) +{ + return (int64x2_t)__builtin_neon_vshlsv2di (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshlq_u8 (uint8x16_t __a, int8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vshluv16qi ((int8x16_t) __a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshlq_u16 (uint16x8_t __a, int16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vshluv8hi ((int16x8_t) __a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshlq_u32 (uint32x4_t __a, int32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vshluv4si ((int32x4_t) __a, __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshlq_u64 (uint64x2_t __a, int64x2_t __b) +{ + return (uint64x2_t)__builtin_neon_vshluv2di ((int64x2_t) __a, __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshl_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int8x8_t)__builtin_neon_vrshlsv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshl_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x4_t)__builtin_neon_vrshlsv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshl_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x2_t)__builtin_neon_vrshlsv2si (__a, __b); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshl_s64 (int64x1_t __a, int64x1_t __b) +{ + return (int64x1_t)__builtin_neon_vrshlsdi (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshl_u8 (uint8x8_t __a, int8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vrshluv8qi ((int8x8_t) __a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshl_u16 (uint16x4_t __a, int16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vrshluv4hi ((int16x4_t) __a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshl_u32 (uint32x2_t __a, int32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vrshluv2si ((int32x2_t) __a, __b); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshl_u64 (uint64x1_t __a, int64x1_t __b) +{ + return (uint64x1_t)__builtin_neon_vrshludi ((int64x1_t) __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshlq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (int8x16_t)__builtin_neon_vrshlsv16qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshlq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int16x8_t)__builtin_neon_vrshlsv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshlq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int32x4_t)__builtin_neon_vrshlsv4si (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshlq_s64 (int64x2_t __a, int64x2_t __b) +{ + return (int64x2_t)__builtin_neon_vrshlsv2di (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshlq_u8 (uint8x16_t __a, int8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vrshluv16qi ((int8x16_t) __a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshlq_u16 (uint16x8_t __a, int16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vrshluv8hi ((int16x8_t) __a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshlq_u32 (uint32x4_t __a, int32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vrshluv4si ((int32x4_t) __a, __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshlq_u64 (uint64x2_t __a, int64x2_t __b) +{ + return (uint64x2_t)__builtin_neon_vrshluv2di ((int64x2_t) __a, __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshl_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int8x8_t)__builtin_neon_vqshlsv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshl_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x4_t)__builtin_neon_vqshlsv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshl_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x2_t)__builtin_neon_vqshlsv2si (__a, __b); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshl_s64 (int64x1_t __a, int64x1_t __b) +{ + return (int64x1_t)__builtin_neon_vqshlsdi (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshl_u8 (uint8x8_t __a, int8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vqshluv8qi ((int8x8_t) __a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshl_u16 (uint16x4_t __a, int16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vqshluv4hi ((int16x4_t) __a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshl_u32 (uint32x2_t __a, int32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vqshluv2si ((int32x2_t) __a, __b); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshl_u64 (uint64x1_t __a, int64x1_t __b) +{ + return (uint64x1_t)__builtin_neon_vqshludi ((int64x1_t) __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (int8x16_t)__builtin_neon_vqshlsv16qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int16x8_t)__builtin_neon_vqshlsv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int32x4_t)__builtin_neon_vqshlsv4si (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlq_s64 (int64x2_t __a, int64x2_t __b) +{ + return (int64x2_t)__builtin_neon_vqshlsv2di (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlq_u8 (uint8x16_t __a, int8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vqshluv16qi ((int8x16_t) __a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlq_u16 (uint16x8_t __a, int16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vqshluv8hi ((int16x8_t) __a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlq_u32 (uint32x4_t __a, int32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vqshluv4si ((int32x4_t) __a, __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlq_u64 (uint64x2_t __a, int64x2_t __b) +{ + return (uint64x2_t)__builtin_neon_vqshluv2di ((int64x2_t) __a, __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshl_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int8x8_t)__builtin_neon_vqrshlsv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshl_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x4_t)__builtin_neon_vqrshlsv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshl_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x2_t)__builtin_neon_vqrshlsv2si (__a, __b); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshl_s64 (int64x1_t __a, int64x1_t __b) +{ + return (int64x1_t)__builtin_neon_vqrshlsdi (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshl_u8 (uint8x8_t __a, int8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vqrshluv8qi ((int8x8_t) __a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshl_u16 (uint16x4_t __a, int16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vqrshluv4hi ((int16x4_t) __a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshl_u32 (uint32x2_t __a, int32x2_t __b) +{ + return (uint32x2_t)__builtin_neon_vqrshluv2si ((int32x2_t) __a, __b); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshl_u64 (uint64x1_t __a, int64x1_t __b) +{ + return (uint64x1_t)__builtin_neon_vqrshludi ((int64x1_t) __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshlq_s8 (int8x16_t __a, int8x16_t __b) +{ + return (int8x16_t)__builtin_neon_vqrshlsv16qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshlq_s16 (int16x8_t __a, int16x8_t __b) +{ + return (int16x8_t)__builtin_neon_vqrshlsv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshlq_s32 (int32x4_t __a, int32x4_t __b) +{ + return (int32x4_t)__builtin_neon_vqrshlsv4si (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshlq_s64 (int64x2_t __a, int64x2_t __b) +{ + return (int64x2_t)__builtin_neon_vqrshlsv2di (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshlq_u8 (uint8x16_t __a, int8x16_t __b) +{ + return (uint8x16_t)__builtin_neon_vqrshluv16qi ((int8x16_t) __a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshlq_u16 (uint16x8_t __a, int16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vqrshluv8hi ((int16x8_t) __a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshlq_u32 (uint32x4_t __a, int32x4_t __b) +{ + return (uint32x4_t)__builtin_neon_vqrshluv4si ((int32x4_t) __a, __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshlq_u64 (uint64x2_t __a, int64x2_t __b) +{ + return (uint64x2_t)__builtin_neon_vqrshluv2di ((int64x2_t) __a, __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshr_n_s8 (int8x8_t __a, const int __b) +{ + return (int8x8_t)__builtin_neon_vshrs_nv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshr_n_s16 (int16x4_t __a, const int __b) +{ + return (int16x4_t)__builtin_neon_vshrs_nv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshr_n_s32 (int32x2_t __a, const int __b) +{ + return (int32x2_t)__builtin_neon_vshrs_nv2si (__a, __b); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshr_n_s64 (int64x1_t __a, const int __b) +{ + return (int64x1_t)__builtin_neon_vshrs_ndi (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshr_n_u8 (uint8x8_t __a, const int __b) +{ + return (uint8x8_t)__builtin_neon_vshru_nv8qi ((int8x8_t) __a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshr_n_u16 (uint16x4_t __a, const int __b) +{ + return (uint16x4_t)__builtin_neon_vshru_nv4hi ((int16x4_t) __a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshr_n_u32 (uint32x2_t __a, const int __b) +{ + return (uint32x2_t)__builtin_neon_vshru_nv2si ((int32x2_t) __a, __b); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshr_n_u64 (uint64x1_t __a, const int __b) +{ + return (uint64x1_t)__builtin_neon_vshru_ndi ((int64x1_t) __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrq_n_s8 (int8x16_t __a, const int __b) +{ + return (int8x16_t)__builtin_neon_vshrs_nv16qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrq_n_s16 (int16x8_t __a, const int __b) +{ + return (int16x8_t)__builtin_neon_vshrs_nv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrq_n_s32 (int32x4_t __a, const int __b) +{ + return (int32x4_t)__builtin_neon_vshrs_nv4si (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrq_n_s64 (int64x2_t __a, const int __b) +{ + return (int64x2_t)__builtin_neon_vshrs_nv2di (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrq_n_u8 (uint8x16_t __a, const int __b) +{ + return (uint8x16_t)__builtin_neon_vshru_nv16qi ((int8x16_t) __a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrq_n_u16 (uint16x8_t __a, const int __b) +{ + return (uint16x8_t)__builtin_neon_vshru_nv8hi ((int16x8_t) __a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrq_n_u32 (uint32x4_t __a, const int __b) +{ + return (uint32x4_t)__builtin_neon_vshru_nv4si ((int32x4_t) __a, __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrq_n_u64 (uint64x2_t __a, const int __b) +{ + return (uint64x2_t)__builtin_neon_vshru_nv2di ((int64x2_t) __a, __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshr_n_s8 (int8x8_t __a, const int __b) +{ + return (int8x8_t)__builtin_neon_vrshrs_nv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshr_n_s16 (int16x4_t __a, const int __b) +{ + return (int16x4_t)__builtin_neon_vrshrs_nv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshr_n_s32 (int32x2_t __a, const int __b) +{ + return (int32x2_t)__builtin_neon_vrshrs_nv2si (__a, __b); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshr_n_s64 (int64x1_t __a, const int __b) +{ + return (int64x1_t)__builtin_neon_vrshrs_ndi (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshr_n_u8 (uint8x8_t __a, const int __b) +{ + return (uint8x8_t)__builtin_neon_vrshru_nv8qi ((int8x8_t) __a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshr_n_u16 (uint16x4_t __a, const int __b) +{ + return (uint16x4_t)__builtin_neon_vrshru_nv4hi ((int16x4_t) __a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshr_n_u32 (uint32x2_t __a, const int __b) +{ + return (uint32x2_t)__builtin_neon_vrshru_nv2si ((int32x2_t) __a, __b); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshr_n_u64 (uint64x1_t __a, const int __b) +{ + return (uint64x1_t)__builtin_neon_vrshru_ndi ((int64x1_t) __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrq_n_s8 (int8x16_t __a, const int __b) +{ + return (int8x16_t)__builtin_neon_vrshrs_nv16qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrq_n_s16 (int16x8_t __a, const int __b) +{ + return (int16x8_t)__builtin_neon_vrshrs_nv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrq_n_s32 (int32x4_t __a, const int __b) +{ + return (int32x4_t)__builtin_neon_vrshrs_nv4si (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrq_n_s64 (int64x2_t __a, const int __b) +{ + return (int64x2_t)__builtin_neon_vrshrs_nv2di (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrq_n_u8 (uint8x16_t __a, const int __b) +{ + return (uint8x16_t)__builtin_neon_vrshru_nv16qi ((int8x16_t) __a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrq_n_u16 (uint16x8_t __a, const int __b) +{ + return (uint16x8_t)__builtin_neon_vrshru_nv8hi ((int16x8_t) __a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrq_n_u32 (uint32x4_t __a, const int __b) +{ + return (uint32x4_t)__builtin_neon_vrshru_nv4si ((int32x4_t) __a, __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrq_n_u64 (uint64x2_t __a, const int __b) +{ + return (uint64x2_t)__builtin_neon_vrshru_nv2di ((int64x2_t) __a, __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_n_s16 (int16x8_t __a, const int __b) +{ + return (int8x8_t)__builtin_neon_vshrn_nv8hi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_n_s32 (int32x4_t __a, const int __b) +{ + return (int16x4_t)__builtin_neon_vshrn_nv4si (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_n_s64 (int64x2_t __a, const int __b) +{ + return (int32x2_t)__builtin_neon_vshrn_nv2di (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_n_u16 (uint16x8_t __a, const int __b) +{ + return (uint8x8_t)__builtin_neon_vshrn_nv8hi ((int16x8_t) __a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_n_u32 (uint32x4_t __a, const int __b) +{ + return (uint16x4_t)__builtin_neon_vshrn_nv4si ((int32x4_t) __a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_n_u64 (uint64x2_t __a, const int __b) +{ + return (uint32x2_t)__builtin_neon_vshrn_nv2di ((int64x2_t) __a, __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_n_s16 (int16x8_t __a, const int __b) +{ + return (int8x8_t)__builtin_neon_vrshrn_nv8hi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_n_s32 (int32x4_t __a, const int __b) +{ + return (int16x4_t)__builtin_neon_vrshrn_nv4si (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_n_s64 (int64x2_t __a, const int __b) +{ + return (int32x2_t)__builtin_neon_vrshrn_nv2di (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_n_u16 (uint16x8_t __a, const int __b) +{ + return (uint8x8_t)__builtin_neon_vrshrn_nv8hi ((int16x8_t) __a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_n_u32 (uint32x4_t __a, const int __b) +{ + return (uint16x4_t)__builtin_neon_vrshrn_nv4si ((int32x4_t) __a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrshrn_n_u64 (uint64x2_t __a, const int __b) +{ + return (uint32x2_t)__builtin_neon_vrshrn_nv2di ((int64x2_t) __a, __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshrn_n_s16 (int16x8_t __a, const int __b) +{ + return (int8x8_t)__builtin_neon_vqshrns_nv8hi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshrn_n_s32 (int32x4_t __a, const int __b) +{ + return (int16x4_t)__builtin_neon_vqshrns_nv4si (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshrn_n_s64 (int64x2_t __a, const int __b) +{ + return (int32x2_t)__builtin_neon_vqshrns_nv2di (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshrn_n_u16 (uint16x8_t __a, const int __b) +{ + return (uint8x8_t)__builtin_neon_vqshrnu_nv8hi ((int16x8_t) __a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshrn_n_u32 (uint32x4_t __a, const int __b) +{ + return (uint16x4_t)__builtin_neon_vqshrnu_nv4si ((int32x4_t) __a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshrn_n_u64 (uint64x2_t __a, const int __b) +{ + return (uint32x2_t)__builtin_neon_vqshrnu_nv2di ((int64x2_t) __a, __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshrn_n_s16 (int16x8_t __a, const int __b) +{ + return (int8x8_t)__builtin_neon_vqrshrns_nv8hi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshrn_n_s32 (int32x4_t __a, const int __b) +{ + return (int16x4_t)__builtin_neon_vqrshrns_nv4si (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshrn_n_s64 (int64x2_t __a, const int __b) +{ + return (int32x2_t)__builtin_neon_vqrshrns_nv2di (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshrn_n_u16 (uint16x8_t __a, const int __b) +{ + return (uint8x8_t)__builtin_neon_vqrshrnu_nv8hi ((int16x8_t) __a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshrn_n_u32 (uint32x4_t __a, const int __b) +{ + return (uint16x4_t)__builtin_neon_vqrshrnu_nv4si ((int32x4_t) __a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshrn_n_u64 (uint64x2_t __a, const int __b) +{ + return (uint32x2_t)__builtin_neon_vqrshrnu_nv2di ((int64x2_t) __a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshrun_n_s16 (int16x8_t __a, const int __b) +{ + return (uint8x8_t)__builtin_neon_vqshrun_nv8hi (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshrun_n_s32 (int32x4_t __a, const int __b) +{ + return (uint16x4_t)__builtin_neon_vqshrun_nv4si (__a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshrun_n_s64 (int64x2_t __a, const int __b) +{ + return (uint32x2_t)__builtin_neon_vqshrun_nv2di (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshrun_n_s16 (int16x8_t __a, const int __b) +{ + return (uint8x8_t)__builtin_neon_vqrshrun_nv8hi (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshrun_n_s32 (int32x4_t __a, const int __b) +{ + return (uint16x4_t)__builtin_neon_vqrshrun_nv4si (__a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrshrun_n_s64 (int64x2_t __a, const int __b) +{ + return (uint32x2_t)__builtin_neon_vqrshrun_nv2di (__a, __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshl_n_s8 (int8x8_t __a, const int __b) +{ + return (int8x8_t)__builtin_neon_vshl_nv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshl_n_s16 (int16x4_t __a, const int __b) +{ + return (int16x4_t)__builtin_neon_vshl_nv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshl_n_s32 (int32x2_t __a, const int __b) +{ + return (int32x2_t)__builtin_neon_vshl_nv2si (__a, __b); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshl_n_s64 (int64x1_t __a, const int __b) +{ + return (int64x1_t)__builtin_neon_vshl_ndi (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshl_n_u8 (uint8x8_t __a, const int __b) +{ + return (uint8x8_t)__builtin_neon_vshl_nv8qi ((int8x8_t) __a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshl_n_u16 (uint16x4_t __a, const int __b) +{ + return (uint16x4_t)__builtin_neon_vshl_nv4hi ((int16x4_t) __a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshl_n_u32 (uint32x2_t __a, const int __b) +{ + return (uint32x2_t)__builtin_neon_vshl_nv2si ((int32x2_t) __a, __b); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshl_n_u64 (uint64x1_t __a, const int __b) +{ + return (uint64x1_t)__builtin_neon_vshl_ndi ((int64x1_t) __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshlq_n_s8 (int8x16_t __a, const int __b) +{ + return (int8x16_t)__builtin_neon_vshl_nv16qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshlq_n_s16 (int16x8_t __a, const int __b) +{ + return (int16x8_t)__builtin_neon_vshl_nv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshlq_n_s32 (int32x4_t __a, const int __b) +{ + return (int32x4_t)__builtin_neon_vshl_nv4si (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshlq_n_s64 (int64x2_t __a, const int __b) +{ + return (int64x2_t)__builtin_neon_vshl_nv2di (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshlq_n_u8 (uint8x16_t __a, const int __b) +{ + return (uint8x16_t)__builtin_neon_vshl_nv16qi ((int8x16_t) __a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshlq_n_u16 (uint16x8_t __a, const int __b) +{ + return (uint16x8_t)__builtin_neon_vshl_nv8hi ((int16x8_t) __a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshlq_n_u32 (uint32x4_t __a, const int __b) +{ + return (uint32x4_t)__builtin_neon_vshl_nv4si ((int32x4_t) __a, __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshlq_n_u64 (uint64x2_t __a, const int __b) +{ + return (uint64x2_t)__builtin_neon_vshl_nv2di ((int64x2_t) __a, __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshl_n_s8 (int8x8_t __a, const int __b) +{ + return (int8x8_t)__builtin_neon_vqshl_s_nv8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshl_n_s16 (int16x4_t __a, const int __b) +{ + return (int16x4_t)__builtin_neon_vqshl_s_nv4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshl_n_s32 (int32x2_t __a, const int __b) +{ + return (int32x2_t)__builtin_neon_vqshl_s_nv2si (__a, __b); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshl_n_s64 (int64x1_t __a, const int __b) +{ + return (int64x1_t)__builtin_neon_vqshl_s_ndi (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshl_n_u8 (uint8x8_t __a, const int __b) +{ + return (uint8x8_t)__builtin_neon_vqshl_u_nv8qi ((int8x8_t) __a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshl_n_u16 (uint16x4_t __a, const int __b) +{ + return (uint16x4_t)__builtin_neon_vqshl_u_nv4hi ((int16x4_t) __a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshl_n_u32 (uint32x2_t __a, const int __b) +{ + return (uint32x2_t)__builtin_neon_vqshl_u_nv2si ((int32x2_t) __a, __b); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshl_n_u64 (uint64x1_t __a, const int __b) +{ + return (uint64x1_t)__builtin_neon_vqshl_u_ndi ((int64x1_t) __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlq_n_s8 (int8x16_t __a, const int __b) +{ + return (int8x16_t)__builtin_neon_vqshl_s_nv16qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlq_n_s16 (int16x8_t __a, const int __b) +{ + return (int16x8_t)__builtin_neon_vqshl_s_nv8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlq_n_s32 (int32x4_t __a, const int __b) +{ + return (int32x4_t)__builtin_neon_vqshl_s_nv4si (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlq_n_s64 (int64x2_t __a, const int __b) +{ + return (int64x2_t)__builtin_neon_vqshl_s_nv2di (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlq_n_u8 (uint8x16_t __a, const int __b) +{ + return (uint8x16_t)__builtin_neon_vqshl_u_nv16qi ((int8x16_t) __a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlq_n_u16 (uint16x8_t __a, const int __b) +{ + return (uint16x8_t)__builtin_neon_vqshl_u_nv8hi ((int16x8_t) __a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlq_n_u32 (uint32x4_t __a, const int __b) +{ + return (uint32x4_t)__builtin_neon_vqshl_u_nv4si ((int32x4_t) __a, __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlq_n_u64 (uint64x2_t __a, const int __b) +{ + return (uint64x2_t)__builtin_neon_vqshl_u_nv2di ((int64x2_t) __a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlu_n_s8 (int8x8_t __a, const int __b) +{ + return (uint8x8_t)__builtin_neon_vqshlu_nv8qi (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlu_n_s16 (int16x4_t __a, const int __b) +{ + return (uint16x4_t)__builtin_neon_vqshlu_nv4hi (__a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlu_n_s32 (int32x2_t __a, const int __b) +{ + return (uint32x2_t)__builtin_neon_vqshlu_nv2si (__a, __b); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshlu_n_s64 (int64x1_t __a, const int __b) +{ + return (uint64x1_t)__builtin_neon_vqshlu_ndi (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshluq_n_s8 (int8x16_t __a, const int __b) +{ + return (uint8x16_t)__builtin_neon_vqshlu_nv16qi (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshluq_n_s16 (int16x8_t __a, const int __b) +{ + return (uint16x8_t)__builtin_neon_vqshlu_nv8hi (__a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshluq_n_s32 (int32x4_t __a, const int __b) +{ + return (uint32x4_t)__builtin_neon_vqshlu_nv4si (__a, __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqshluq_n_s64 (int64x2_t __a, const int __b) +{ + return (uint64x2_t)__builtin_neon_vqshlu_nv2di (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshll_n_s8 (int8x8_t __a, const int __b) +{ + return (int16x8_t)__builtin_neon_vshlls_nv8qi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshll_n_s16 (int16x4_t __a, const int __b) +{ + return (int32x4_t)__builtin_neon_vshlls_nv4hi (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshll_n_s32 (int32x2_t __a, const int __b) +{ + return (int64x2_t)__builtin_neon_vshlls_nv2si (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshll_n_u8 (uint8x8_t __a, const int __b) +{ + return (uint16x8_t)__builtin_neon_vshllu_nv8qi ((int8x8_t) __a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshll_n_u16 (uint16x4_t __a, const int __b) +{ + return (uint32x4_t)__builtin_neon_vshllu_nv4hi ((int16x4_t) __a, __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshll_n_u32 (uint32x2_t __a, const int __b) +{ + return (uint64x2_t)__builtin_neon_vshllu_nv2si ((int32x2_t) __a, __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c) +{ + return (int8x8_t)__builtin_neon_vsras_nv8qi (__a, __b, __c); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c) +{ + return (int16x4_t)__builtin_neon_vsras_nv4hi (__a, __b, __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c) +{ + return (int32x2_t)__builtin_neon_vsras_nv2si (__a, __b, __c); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c) +{ + return (int64x1_t)__builtin_neon_vsras_ndi (__a, __b, __c); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c) +{ + return (uint8x8_t)__builtin_neon_vsrau_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c) +{ + return (uint16x4_t)__builtin_neon_vsrau_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c) +{ + return (uint32x2_t)__builtin_neon_vsrau_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c) +{ + return (uint64x1_t)__builtin_neon_vsrau_ndi ((int64x1_t) __a, (int64x1_t) __b, __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c) +{ + return (int8x16_t)__builtin_neon_vsras_nv16qi (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c) +{ + return (int16x8_t)__builtin_neon_vsras_nv8hi (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c) +{ + return (int32x4_t)__builtin_neon_vsras_nv4si (__a, __b, __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c) +{ + return (int64x2_t)__builtin_neon_vsras_nv2di (__a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c) +{ + return (uint8x16_t)__builtin_neon_vsrau_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c) +{ + return (uint16x8_t)__builtin_neon_vsrau_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c) +{ + return (uint32x4_t)__builtin_neon_vsrau_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c) +{ + return (uint64x2_t)__builtin_neon_vsrau_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c) +{ + return (int8x8_t)__builtin_neon_vrsras_nv8qi (__a, __b, __c); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c) +{ + return (int16x4_t)__builtin_neon_vrsras_nv4hi (__a, __b, __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c) +{ + return (int32x2_t)__builtin_neon_vrsras_nv2si (__a, __b, __c); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c) +{ + return (int64x1_t)__builtin_neon_vrsras_ndi (__a, __b, __c); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c) +{ + return (uint8x8_t)__builtin_neon_vrsrau_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c) +{ + return (uint16x4_t)__builtin_neon_vrsrau_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c) +{ + return (uint32x2_t)__builtin_neon_vrsrau_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c) +{ + return (uint64x1_t)__builtin_neon_vrsrau_ndi ((int64x1_t) __a, (int64x1_t) __b, __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c) +{ + return (int8x16_t)__builtin_neon_vrsras_nv16qi (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c) +{ + return (int16x8_t)__builtin_neon_vrsras_nv8hi (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c) +{ + return (int32x4_t)__builtin_neon_vrsras_nv4si (__a, __b, __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c) +{ + return (int64x2_t)__builtin_neon_vrsras_nv2di (__a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c) +{ + return (uint8x16_t)__builtin_neon_vrsrau_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c) +{ + return (uint16x8_t)__builtin_neon_vrsrau_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c) +{ + return (uint32x4_t)__builtin_neon_vrsrau_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c) +{ + return (uint64x2_t)__builtin_neon_vrsrau_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsri_n_p64 (poly64x1_t __a, poly64x1_t __b, const int __c) +{ + return (poly64x1_t)__builtin_neon_vsri_ndi (__a, __b, __c); +} + +#pragma GCC pop_options +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsri_n_s8 (int8x8_t __a, int8x8_t __b, const int __c) +{ + return (int8x8_t)__builtin_neon_vsri_nv8qi (__a, __b, __c); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsri_n_s16 (int16x4_t __a, int16x4_t __b, const int __c) +{ + return (int16x4_t)__builtin_neon_vsri_nv4hi (__a, __b, __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsri_n_s32 (int32x2_t __a, int32x2_t __b, const int __c) +{ + return (int32x2_t)__builtin_neon_vsri_nv2si (__a, __b, __c); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsri_n_s64 (int64x1_t __a, int64x1_t __b, const int __c) +{ + return (int64x1_t)__builtin_neon_vsri_ndi (__a, __b, __c); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsri_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c) +{ + return (uint8x8_t)__builtin_neon_vsri_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsri_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c) +{ + return (uint16x4_t)__builtin_neon_vsri_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsri_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c) +{ + return (uint32x2_t)__builtin_neon_vsri_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsri_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c) +{ + return (uint64x1_t)__builtin_neon_vsri_ndi ((int64x1_t) __a, (int64x1_t) __b, __c); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsri_n_p8 (poly8x8_t __a, poly8x8_t __b, const int __c) +{ + return (poly8x8_t)__builtin_neon_vsri_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c); +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsri_n_p16 (poly16x4_t __a, poly16x4_t __b, const int __c) +{ + return (poly16x4_t)__builtin_neon_vsri_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsriq_n_p64 (poly64x2_t __a, poly64x2_t __b, const int __c) +{ + return (poly64x2_t)__builtin_neon_vsri_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c); +} + +#pragma GCC pop_options +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c) +{ + return (int8x16_t)__builtin_neon_vsri_nv16qi (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c) +{ + return (int16x8_t)__builtin_neon_vsri_nv8hi (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c) +{ + return (int32x4_t)__builtin_neon_vsri_nv4si (__a, __b, __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsriq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c) +{ + return (int64x2_t)__builtin_neon_vsri_nv2di (__a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c) +{ + return (uint8x16_t)__builtin_neon_vsri_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c) +{ + return (uint16x8_t)__builtin_neon_vsri_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c) +{ + return (uint32x4_t)__builtin_neon_vsri_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsriq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c) +{ + return (uint64x2_t)__builtin_neon_vsri_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsriq_n_p8 (poly8x16_t __a, poly8x16_t __b, const int __c) +{ + return (poly8x16_t)__builtin_neon_vsri_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c); +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsriq_n_p16 (poly16x8_t __a, poly16x8_t __b, const int __c) +{ + return (poly16x8_t)__builtin_neon_vsri_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsli_n_p64 (poly64x1_t __a, poly64x1_t __b, const int __c) +{ + return (poly64x1_t)__builtin_neon_vsli_ndi (__a, __b, __c); +} + +#pragma GCC pop_options +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsli_n_s8 (int8x8_t __a, int8x8_t __b, const int __c) +{ + return (int8x8_t)__builtin_neon_vsli_nv8qi (__a, __b, __c); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsli_n_s16 (int16x4_t __a, int16x4_t __b, const int __c) +{ + return (int16x4_t)__builtin_neon_vsli_nv4hi (__a, __b, __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsli_n_s32 (int32x2_t __a, int32x2_t __b, const int __c) +{ + return (int32x2_t)__builtin_neon_vsli_nv2si (__a, __b, __c); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsli_n_s64 (int64x1_t __a, int64x1_t __b, const int __c) +{ + return (int64x1_t)__builtin_neon_vsli_ndi (__a, __b, __c); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsli_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c) +{ + return (uint8x8_t)__builtin_neon_vsli_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsli_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c) +{ + return (uint16x4_t)__builtin_neon_vsli_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsli_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c) +{ + return (uint32x2_t)__builtin_neon_vsli_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsli_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c) +{ + return (uint64x1_t)__builtin_neon_vsli_ndi ((int64x1_t) __a, (int64x1_t) __b, __c); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsli_n_p8 (poly8x8_t __a, poly8x8_t __b, const int __c) +{ + return (poly8x8_t)__builtin_neon_vsli_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c); +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsli_n_p16 (poly16x4_t __a, poly16x4_t __b, const int __c) +{ + return (poly16x4_t)__builtin_neon_vsli_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsliq_n_p64 (poly64x2_t __a, poly64x2_t __b, const int __c) +{ + return (poly64x2_t)__builtin_neon_vsli_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c); +} + +#pragma GCC pop_options +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c) +{ + return (int8x16_t)__builtin_neon_vsli_nv16qi (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c) +{ + return (int16x8_t)__builtin_neon_vsli_nv8hi (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c) +{ + return (int32x4_t)__builtin_neon_vsli_nv4si (__a, __b, __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsliq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c) +{ + return (int64x2_t)__builtin_neon_vsli_nv2di (__a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c) +{ + return (uint8x16_t)__builtin_neon_vsli_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c) +{ + return (uint16x8_t)__builtin_neon_vsli_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c) +{ + return (uint32x4_t)__builtin_neon_vsli_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsliq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c) +{ + return (uint64x2_t)__builtin_neon_vsli_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsliq_n_p8 (poly8x16_t __a, poly8x16_t __b, const int __c) +{ + return (poly8x16_t)__builtin_neon_vsli_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c); +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsliq_n_p16 (poly16x8_t __a, poly16x8_t __b, const int __c) +{ + return (poly16x8_t)__builtin_neon_vsli_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabs_s8 (int8x8_t __a) +{ + return (int8x8_t)__builtin_neon_vabsv8qi (__a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabs_s16 (int16x4_t __a) +{ + return (int16x4_t)__builtin_neon_vabsv4hi (__a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabs_s32 (int32x2_t __a) +{ + return (int32x2_t)__builtin_neon_vabsv2si (__a); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabs_f32 (float32x2_t __a) +{ + return (float32x2_t)__builtin_neon_vabsv2sf (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabsq_s8 (int8x16_t __a) +{ + return (int8x16_t)__builtin_neon_vabsv16qi (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabsq_s16 (int16x8_t __a) +{ + return (int16x8_t)__builtin_neon_vabsv8hi (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabsq_s32 (int32x4_t __a) +{ + return (int32x4_t)__builtin_neon_vabsv4si (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabsq_f32 (float32x4_t __a) +{ + return (float32x4_t)__builtin_neon_vabsv4sf (__a); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqabs_s8 (int8x8_t __a) +{ + return (int8x8_t)__builtin_neon_vqabsv8qi (__a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqabs_s16 (int16x4_t __a) +{ + return (int16x4_t)__builtin_neon_vqabsv4hi (__a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqabs_s32 (int32x2_t __a) +{ + return (int32x2_t)__builtin_neon_vqabsv2si (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqabsq_s8 (int8x16_t __a) +{ + return (int8x16_t)__builtin_neon_vqabsv16qi (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqabsq_s16 (int16x8_t __a) +{ + return (int16x8_t)__builtin_neon_vqabsv8hi (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqabsq_s32 (int32x4_t __a) +{ + return (int32x4_t)__builtin_neon_vqabsv4si (__a); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vneg_s8 (int8x8_t __a) +{ + return (int8x8_t)__builtin_neon_vnegv8qi (__a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vneg_s16 (int16x4_t __a) +{ + return (int16x4_t)__builtin_neon_vnegv4hi (__a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vneg_s32 (int32x2_t __a) +{ + return (int32x2_t)__builtin_neon_vnegv2si (__a); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vneg_f32 (float32x2_t __a) +{ + return (float32x2_t)__builtin_neon_vnegv2sf (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vnegq_s8 (int8x16_t __a) +{ + return (int8x16_t)__builtin_neon_vnegv16qi (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vnegq_s16 (int16x8_t __a) +{ + return (int16x8_t)__builtin_neon_vnegv8hi (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vnegq_s32 (int32x4_t __a) +{ + return (int32x4_t)__builtin_neon_vnegv4si (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vnegq_f32 (float32x4_t __a) +{ + return (float32x4_t)__builtin_neon_vnegv4sf (__a); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqneg_s8 (int8x8_t __a) +{ + return (int8x8_t)__builtin_neon_vqnegv8qi (__a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqneg_s16 (int16x4_t __a) +{ + return (int16x4_t)__builtin_neon_vqnegv4hi (__a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqneg_s32 (int32x2_t __a) +{ + return (int32x2_t)__builtin_neon_vqnegv2si (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqnegq_s8 (int8x16_t __a) +{ + return (int8x16_t)__builtin_neon_vqnegv16qi (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqnegq_s16 (int16x8_t __a) +{ + return (int16x8_t)__builtin_neon_vqnegv8hi (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqnegq_s32 (int32x4_t __a) +{ + return (int32x4_t)__builtin_neon_vqnegv4si (__a); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmvn_s8 (int8x8_t __a) +{ + return (int8x8_t)__builtin_neon_vmvnv8qi (__a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmvn_s16 (int16x4_t __a) +{ + return (int16x4_t)__builtin_neon_vmvnv4hi (__a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmvn_s32 (int32x2_t __a) +{ + return (int32x2_t)__builtin_neon_vmvnv2si (__a); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmvn_u8 (uint8x8_t __a) +{ + return (uint8x8_t)__builtin_neon_vmvnv8qi ((int8x8_t) __a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmvn_u16 (uint16x4_t __a) +{ + return (uint16x4_t)__builtin_neon_vmvnv4hi ((int16x4_t) __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmvn_u32 (uint32x2_t __a) +{ + return (uint32x2_t)__builtin_neon_vmvnv2si ((int32x2_t) __a); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmvn_p8 (poly8x8_t __a) +{ + return (poly8x8_t)__builtin_neon_vmvnv8qi ((int8x8_t) __a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmvnq_s8 (int8x16_t __a) +{ + return (int8x16_t)__builtin_neon_vmvnv16qi (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmvnq_s16 (int16x8_t __a) +{ + return (int16x8_t)__builtin_neon_vmvnv8hi (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmvnq_s32 (int32x4_t __a) +{ + return (int32x4_t)__builtin_neon_vmvnv4si (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmvnq_u8 (uint8x16_t __a) +{ + return (uint8x16_t)__builtin_neon_vmvnv16qi ((int8x16_t) __a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmvnq_u16 (uint16x8_t __a) +{ + return (uint16x8_t)__builtin_neon_vmvnv8hi ((int16x8_t) __a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmvnq_u32 (uint32x4_t __a) +{ + return (uint32x4_t)__builtin_neon_vmvnv4si ((int32x4_t) __a); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmvnq_p8 (poly8x16_t __a) +{ + return (poly8x16_t)__builtin_neon_vmvnv16qi ((int8x16_t) __a); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcls_s8 (int8x8_t __a) +{ + return (int8x8_t)__builtin_neon_vclsv8qi (__a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcls_s16 (int16x4_t __a) +{ + return (int16x4_t)__builtin_neon_vclsv4hi (__a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcls_s32 (int32x2_t __a) +{ + return (int32x2_t)__builtin_neon_vclsv2si (__a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclsq_s8 (int8x16_t __a) +{ + return (int8x16_t)__builtin_neon_vclsv16qi (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclsq_s16 (int16x8_t __a) +{ + return (int16x8_t)__builtin_neon_vclsv8hi (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclsq_s32 (int32x4_t __a) +{ + return (int32x4_t)__builtin_neon_vclsv4si (__a); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclz_s8 (int8x8_t __a) +{ + return (int8x8_t)__builtin_neon_vclzv8qi (__a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclz_s16 (int16x4_t __a) +{ + return (int16x4_t)__builtin_neon_vclzv4hi (__a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclz_s32 (int32x2_t __a) +{ + return (int32x2_t)__builtin_neon_vclzv2si (__a); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclz_u8 (uint8x8_t __a) +{ + return (uint8x8_t)__builtin_neon_vclzv8qi ((int8x8_t) __a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclz_u16 (uint16x4_t __a) +{ + return (uint16x4_t)__builtin_neon_vclzv4hi ((int16x4_t) __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclz_u32 (uint32x2_t __a) +{ + return (uint32x2_t)__builtin_neon_vclzv2si ((int32x2_t) __a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclzq_s8 (int8x16_t __a) +{ + return (int8x16_t)__builtin_neon_vclzv16qi (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclzq_s16 (int16x8_t __a) +{ + return (int16x8_t)__builtin_neon_vclzv8hi (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclzq_s32 (int32x4_t __a) +{ + return (int32x4_t)__builtin_neon_vclzv4si (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclzq_u8 (uint8x16_t __a) +{ + return (uint8x16_t)__builtin_neon_vclzv16qi ((int8x16_t) __a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclzq_u16 (uint16x8_t __a) +{ + return (uint16x8_t)__builtin_neon_vclzv8hi ((int16x8_t) __a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclzq_u32 (uint32x4_t __a) +{ + return (uint32x4_t)__builtin_neon_vclzv4si ((int32x4_t) __a); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcnt_s8 (int8x8_t __a) +{ + return (int8x8_t)__builtin_neon_vcntv8qi (__a); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcnt_u8 (uint8x8_t __a) +{ + return (uint8x8_t)__builtin_neon_vcntv8qi ((int8x8_t) __a); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcnt_p8 (poly8x8_t __a) +{ + return (poly8x8_t)__builtin_neon_vcntv8qi ((int8x8_t) __a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcntq_s8 (int8x16_t __a) +{ + return (int8x16_t)__builtin_neon_vcntv16qi (__a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcntq_u8 (uint8x16_t __a) +{ + return (uint8x16_t)__builtin_neon_vcntv16qi ((int8x16_t) __a); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcntq_p8 (poly8x16_t __a) +{ + return (poly8x16_t)__builtin_neon_vcntv16qi ((int8x16_t) __a); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrecpe_f32 (float32x2_t __a) +{ + return (float32x2_t)__builtin_neon_vrecpev2sf (__a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrecpe_u32 (uint32x2_t __a) +{ + return (uint32x2_t)__builtin_neon_vrecpev2si ((int32x2_t) __a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrecpeq_f32 (float32x4_t __a) +{ + return (float32x4_t)__builtin_neon_vrecpev4sf (__a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrecpeq_u32 (uint32x4_t __a) +{ + return (uint32x4_t)__builtin_neon_vrecpev4si ((int32x4_t) __a); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsqrte_f32 (float32x2_t __a) +{ + return (float32x2_t)__builtin_neon_vrsqrtev2sf (__a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsqrte_u32 (uint32x2_t __a) +{ + return (uint32x2_t)__builtin_neon_vrsqrtev2si ((int32x2_t) __a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsqrteq_f32 (float32x4_t __a) +{ + return (float32x4_t)__builtin_neon_vrsqrtev4sf (__a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsqrteq_u32 (uint32x4_t __a) +{ + return (uint32x4_t)__builtin_neon_vrsqrtev4si ((int32x4_t) __a); +} + +__extension__ extern __inline int8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_lane_s8 (int8x8_t __a, const int __b) +{ + return (int8_t)__builtin_neon_vget_lanev8qi (__a, __b); +} + +__extension__ extern __inline int16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_lane_s16 (int16x4_t __a, const int __b) +{ + return (int16_t)__builtin_neon_vget_lanev4hi (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_lane_s32 (int32x2_t __a, const int __b) +{ + return (int32_t)__builtin_neon_vget_lanev2si (__a, __b); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +/* Functions cannot accept or return __FP16 types. Even if the function + were marked always-inline so there were no call sites, the declaration + would nonetheless raise an error. Hence, we must use a macro instead. */ + + /* For big-endian, GCC's vector indices are reversed within each 64 + bits compared to the architectural lane indices used by Neon + intrinsics. */ +#ifdef __ARM_BIG_ENDIAN +#define __ARM_NUM_LANES(__v) (sizeof (__v) / sizeof (__v[0])) +#define __arm_lane(__vec, __idx) (__idx ^ (__ARM_NUM_LANES(__vec) - 1)) +#define __arm_laneq(__vec, __idx) (__idx ^ (__ARM_NUM_LANES(__vec)/2 - 1)) +#else +#define __arm_lane(__vec, __idx) __idx +#define __arm_laneq(__vec, __idx) __idx +#endif + +#define vget_lane_f16(__v, __idx) \ + __extension__ \ + ({ \ + float16x4_t __vec = (__v); \ + __builtin_arm_lane_check (4, __idx); \ + float16_t __res = __vec[__arm_lane(__vec, __idx)]; \ + __res; \ + }) +#endif + +__extension__ extern __inline float32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_lane_f32 (float32x2_t __a, const int __b) +{ + return (float32_t)__builtin_neon_vget_lanev2sf (__a, __b); +} + +__extension__ extern __inline uint8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_lane_u8 (uint8x8_t __a, const int __b) +{ + return (uint8_t)__builtin_neon_vget_laneuv8qi ((int8x8_t) __a, __b); +} + +__extension__ extern __inline uint16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_lane_u16 (uint16x4_t __a, const int __b) +{ + return (uint16_t)__builtin_neon_vget_laneuv4hi ((int16x4_t) __a, __b); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_lane_u32 (uint32x2_t __a, const int __b) +{ + return (uint32_t)__builtin_neon_vget_laneuv2si ((int32x2_t) __a, __b); +} + +__extension__ extern __inline poly8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_lane_p8 (poly8x8_t __a, const int __b) +{ + return (poly8_t)__builtin_neon_vget_laneuv8qi ((int8x8_t) __a, __b); +} + +__extension__ extern __inline poly16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_lane_p16 (poly16x4_t __a, const int __b) +{ + return (poly16_t)__builtin_neon_vget_laneuv4hi ((int16x4_t) __a, __b); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_lane_s64 (int64x1_t __a, const int __b) +{ + return (int64_t)__builtin_neon_vget_lanedi (__a, __b); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_lane_p64 (poly64x1_t __a, const int __b) +{ + return (poly64_t)__builtin_neon_vget_lanedi ((int64x1_t) __a, __b); +} + +#pragma GCC pop_options +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_lane_u64 (uint64x1_t __a, const int __b) +{ + return (uint64_t)__builtin_neon_vget_lanedi ((int64x1_t) __a, __b); +} + +__extension__ extern __inline int8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vgetq_lane_s8 (int8x16_t __a, const int __b) +{ + return (int8_t)__builtin_neon_vget_lanev16qi (__a, __b); +} + +__extension__ extern __inline int16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vgetq_lane_s16 (int16x8_t __a, const int __b) +{ + return (int16_t)__builtin_neon_vget_lanev8hi (__a, __b); +} + +__extension__ extern __inline int32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vgetq_lane_s32 (int32x4_t __a, const int __b) +{ + return (int32_t)__builtin_neon_vget_lanev4si (__a, __b); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +#define vgetq_lane_f16(__v, __idx) \ + __extension__ \ + ({ \ + float16x8_t __vec = (__v); \ + __builtin_arm_lane_check (8, __idx); \ + float16_t __res = __vec[__arm_laneq(__vec, __idx)]; \ + __res; \ + }) +#endif + +__extension__ extern __inline float32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vgetq_lane_f32 (float32x4_t __a, const int __b) +{ + return (float32_t)__builtin_neon_vget_lanev4sf (__a, __b); +} + +__extension__ extern __inline uint8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vgetq_lane_u8 (uint8x16_t __a, const int __b) +{ + return (uint8_t)__builtin_neon_vget_laneuv16qi ((int8x16_t) __a, __b); +} + +__extension__ extern __inline uint16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vgetq_lane_u16 (uint16x8_t __a, const int __b) +{ + return (uint16_t)__builtin_neon_vget_laneuv8hi ((int16x8_t) __a, __b); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vgetq_lane_u32 (uint32x4_t __a, const int __b) +{ + return (uint32_t)__builtin_neon_vget_laneuv4si ((int32x4_t) __a, __b); +} + +__extension__ extern __inline poly8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vgetq_lane_p8 (poly8x16_t __a, const int __b) +{ + return (poly8_t)__builtin_neon_vget_laneuv16qi ((int8x16_t) __a, __b); +} + +__extension__ extern __inline poly16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vgetq_lane_p16 (poly16x8_t __a, const int __b) +{ + return (poly16_t)__builtin_neon_vget_laneuv8hi ((int16x8_t) __a, __b); +} + +__extension__ extern __inline int64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vgetq_lane_s64 (int64x2_t __a, const int __b) +{ + return (int64_t)__builtin_neon_vget_lanev2di (__a, __b); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vgetq_lane_p64 (poly64x2_t __a, const int __b) +{ + return (poly64_t)__builtin_neon_vget_lanev2di ((int64x2_t) __a, __b); +} + +#pragma GCC pop_options +__extension__ extern __inline uint64_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vgetq_lane_u64 (uint64x2_t __a, const int __b) +{ + return (uint64_t)__builtin_neon_vget_lanev2di ((int64x2_t) __a, __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vset_lane_s8 (int8_t __a, int8x8_t __b, const int __c) +{ + return (int8x8_t)__builtin_neon_vset_lanev8qi ((__builtin_neon_qi) __a, __b, __c); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vset_lane_s16 (int16_t __a, int16x4_t __b, const int __c) +{ + return (int16x4_t)__builtin_neon_vset_lanev4hi ((__builtin_neon_hi) __a, __b, __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vset_lane_s32 (int32_t __a, int32x2_t __b, const int __c) +{ + return (int32x2_t)__builtin_neon_vset_lanev2si ((__builtin_neon_si) __a, __b, __c); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +#define vset_lane_f16(__e, __v, __idx) \ + __extension__ \ + ({ \ + float16_t __elem = (__e); \ + float16x4_t __vec = (__v); \ + __builtin_arm_lane_check (4, __idx); \ + __vec[__arm_lane (__vec, __idx)] = __elem; \ + __vec; \ + }) +#endif + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vset_lane_f32 (float32_t __a, float32x2_t __b, const int __c) +{ + return (float32x2_t)__builtin_neon_vset_lanev2sf ((__builtin_neon_sf) __a, __b, __c); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vset_lane_u8 (uint8_t __a, uint8x8_t __b, const int __c) +{ + return (uint8x8_t)__builtin_neon_vset_lanev8qi ((__builtin_neon_qi) __a, (int8x8_t) __b, __c); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vset_lane_u16 (uint16_t __a, uint16x4_t __b, const int __c) +{ + return (uint16x4_t)__builtin_neon_vset_lanev4hi ((__builtin_neon_hi) __a, (int16x4_t) __b, __c); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vset_lane_u32 (uint32_t __a, uint32x2_t __b, const int __c) +{ + return (uint32x2_t)__builtin_neon_vset_lanev2si ((__builtin_neon_si) __a, (int32x2_t) __b, __c); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vset_lane_p8 (poly8_t __a, poly8x8_t __b, const int __c) +{ + return (poly8x8_t)__builtin_neon_vset_lanev8qi ((__builtin_neon_qi) __a, (int8x8_t) __b, __c); +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vset_lane_p16 (poly16_t __a, poly16x4_t __b, const int __c) +{ + return (poly16x4_t)__builtin_neon_vset_lanev4hi ((__builtin_neon_hi) __a, (int16x4_t) __b, __c); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vset_lane_s64 (int64_t __a, int64x1_t __b, const int __c) +{ + return (int64x1_t)__builtin_neon_vset_lanedi ((__builtin_neon_di) __a, __b, __c); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vset_lane_u64 (uint64_t __a, uint64x1_t __b, const int __c) +{ + return (uint64x1_t)__builtin_neon_vset_lanedi ((__builtin_neon_di) __a, (int64x1_t) __b, __c); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vset_lane_p64 (poly64_t __a, poly64x1_t __b, const int __c) +{ + return (poly64x1_t)__builtin_neon_vset_lanedi ((__builtin_neon_di) __a, (int64x1_t) __b, __c); +} + +#pragma GCC pop_options +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsetq_lane_s8 (int8_t __a, int8x16_t __b, const int __c) +{ + return (int8x16_t)__builtin_neon_vset_lanev16qi ((__builtin_neon_qi) __a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsetq_lane_s16 (int16_t __a, int16x8_t __b, const int __c) +{ + return (int16x8_t)__builtin_neon_vset_lanev8hi ((__builtin_neon_hi) __a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsetq_lane_s32 (int32_t __a, int32x4_t __b, const int __c) +{ + return (int32x4_t)__builtin_neon_vset_lanev4si ((__builtin_neon_si) __a, __b, __c); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +#define vsetq_lane_f16(__e, __v, __idx) \ + __extension__ \ + ({ \ + float16_t __elem = (__e); \ + float16x8_t __vec = (__v); \ + __builtin_arm_lane_check (8, __idx); \ + __vec[__arm_laneq (__vec, __idx)] = __elem; \ + __vec; \ + }) +#endif + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsetq_lane_f32 (float32_t __a, float32x4_t __b, const int __c) +{ + return (float32x4_t)__builtin_neon_vset_lanev4sf ((__builtin_neon_sf) __a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsetq_lane_u8 (uint8_t __a, uint8x16_t __b, const int __c) +{ + return (uint8x16_t)__builtin_neon_vset_lanev16qi ((__builtin_neon_qi) __a, (int8x16_t) __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsetq_lane_u16 (uint16_t __a, uint16x8_t __b, const int __c) +{ + return (uint16x8_t)__builtin_neon_vset_lanev8hi ((__builtin_neon_hi) __a, (int16x8_t) __b, __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsetq_lane_u32 (uint32_t __a, uint32x4_t __b, const int __c) +{ + return (uint32x4_t)__builtin_neon_vset_lanev4si ((__builtin_neon_si) __a, (int32x4_t) __b, __c); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsetq_lane_p8 (poly8_t __a, poly8x16_t __b, const int __c) +{ + return (poly8x16_t)__builtin_neon_vset_lanev16qi ((__builtin_neon_qi) __a, (int8x16_t) __b, __c); +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsetq_lane_p16 (poly16_t __a, poly16x8_t __b, const int __c) +{ + return (poly16x8_t)__builtin_neon_vset_lanev8hi ((__builtin_neon_hi) __a, (int16x8_t) __b, __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsetq_lane_s64 (int64_t __a, int64x2_t __b, const int __c) +{ + return (int64x2_t)__builtin_neon_vset_lanev2di ((__builtin_neon_di) __a, __b, __c); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsetq_lane_u64 (uint64_t __a, uint64x2_t __b, const int __c) +{ + return (uint64x2_t)__builtin_neon_vset_lanev2di ((__builtin_neon_di) __a, (int64x2_t) __b, __c); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsetq_lane_p64 (poly64_t __a, poly64x2_t __b, const int __c) +{ + return (poly64x2_t)__builtin_neon_vset_lanev2di ((__builtin_neon_di) __a, (int64x2_t) __b, __c); +} + +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcreate_p64 (uint64_t __a) +{ + return (poly64x1_t)__builtin_neon_vcreatedi ((__builtin_neon_di) __a); +} + +#pragma GCC pop_options +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcreate_s8 (uint64_t __a) +{ + return (int8x8_t)__builtin_neon_vcreatev8qi ((__builtin_neon_di) __a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcreate_s16 (uint64_t __a) +{ + return (int16x4_t)__builtin_neon_vcreatev4hi ((__builtin_neon_di) __a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcreate_s32 (uint64_t __a) +{ + return (int32x2_t)__builtin_neon_vcreatev2si ((__builtin_neon_di) __a); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcreate_s64 (uint64_t __a) +{ + return (int64x1_t)__builtin_neon_vcreatedi ((__builtin_neon_di) __a); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcreate_f16 (uint64_t __a) +{ + return (float16x4_t) __a; +} +#endif + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcreate_f32 (uint64_t __a) +{ + return (float32x2_t)__builtin_neon_vcreatev2sf ((__builtin_neon_di) __a); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcreate_u8 (uint64_t __a) +{ + return (uint8x8_t)__builtin_neon_vcreatev8qi ((__builtin_neon_di) __a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcreate_u16 (uint64_t __a) +{ + return (uint16x4_t)__builtin_neon_vcreatev4hi ((__builtin_neon_di) __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcreate_u32 (uint64_t __a) +{ + return (uint32x2_t)__builtin_neon_vcreatev2si ((__builtin_neon_di) __a); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcreate_u64 (uint64_t __a) +{ + return (uint64x1_t)__builtin_neon_vcreatedi ((__builtin_neon_di) __a); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcreate_p8 (uint64_t __a) +{ + return (poly8x8_t)__builtin_neon_vcreatev8qi ((__builtin_neon_di) __a); +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcreate_p16 (uint64_t __a) +{ + return (poly16x4_t)__builtin_neon_vcreatev4hi ((__builtin_neon_di) __a); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_n_s8 (int8_t __a) +{ + return (int8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_n_s16 (int16_t __a) +{ + return (int16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_n_s32 (int32_t __a) +{ + return (int32x2_t)__builtin_neon_vdup_nv2si ((__builtin_neon_si) __a); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_n_f32 (float32_t __a) +{ + return (float32x2_t)__builtin_neon_vdup_nv2sf ((__builtin_neon_sf) __a); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_n_u8 (uint8_t __a) +{ + return (uint8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_n_u16 (uint16_t __a) +{ + return (uint16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_n_u32 (uint32_t __a) +{ + return (uint32x2_t)__builtin_neon_vdup_nv2si ((__builtin_neon_si) __a); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_n_p8 (poly8_t __a) +{ + return (poly8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a); +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_n_p16 (poly16_t __a) +{ + return (poly16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_n_p64 (poly64_t __a) +{ + return (poly64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a); +} + +#pragma GCC pop_options +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_n_s64 (int64_t __a) +{ + return (int64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_n_u64 (uint64_t __a) +{ + return (uint64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_n_p64 (poly64_t __a) +{ + return (poly64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a); +} + +#pragma GCC pop_options +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_n_s8 (int8_t __a) +{ + return (int8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_n_s16 (int16_t __a) +{ + return (int16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_n_s32 (int32_t __a) +{ + return (int32x4_t)__builtin_neon_vdup_nv4si ((__builtin_neon_si) __a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_n_f32 (float32_t __a) +{ + return (float32x4_t)__builtin_neon_vdup_nv4sf ((__builtin_neon_sf) __a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_n_u8 (uint8_t __a) +{ + return (uint8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_n_u16 (uint16_t __a) +{ + return (uint16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_n_u32 (uint32_t __a) +{ + return (uint32x4_t)__builtin_neon_vdup_nv4si ((__builtin_neon_si) __a); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_n_p8 (poly8_t __a) +{ + return (poly8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a); +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_n_p16 (poly16_t __a) +{ + return (poly16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_n_s64 (int64_t __a) +{ + return (int64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_n_u64 (uint64_t __a) +{ + return (uint64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmov_n_s8 (int8_t __a) +{ + return (int8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmov_n_s16 (int16_t __a) +{ + return (int16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmov_n_s32 (int32_t __a) +{ + return (int32x2_t)__builtin_neon_vdup_nv2si ((__builtin_neon_si) __a); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmov_n_f32 (float32_t __a) +{ + return (float32x2_t)__builtin_neon_vdup_nv2sf ((__builtin_neon_sf) __a); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmov_n_u8 (uint8_t __a) +{ + return (uint8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmov_n_u16 (uint16_t __a) +{ + return (uint16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmov_n_u32 (uint32_t __a) +{ + return (uint32x2_t)__builtin_neon_vdup_nv2si ((__builtin_neon_si) __a); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmov_n_p8 (poly8_t __a) +{ + return (poly8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a); +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmov_n_p16 (poly16_t __a) +{ + return (poly16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmov_n_s64 (int64_t __a) +{ + return (int64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmov_n_u64 (uint64_t __a) +{ + return (uint64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovq_n_s8 (int8_t __a) +{ + return (int8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovq_n_s16 (int16_t __a) +{ + return (int16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovq_n_s32 (int32_t __a) +{ + return (int32x4_t)__builtin_neon_vdup_nv4si ((__builtin_neon_si) __a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovq_n_f32 (float32_t __a) +{ + return (float32x4_t)__builtin_neon_vdup_nv4sf ((__builtin_neon_sf) __a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovq_n_u8 (uint8_t __a) +{ + return (uint8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovq_n_u16 (uint16_t __a) +{ + return (uint16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovq_n_u32 (uint32_t __a) +{ + return (uint32x4_t)__builtin_neon_vdup_nv4si ((__builtin_neon_si) __a); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovq_n_p8 (poly8_t __a) +{ + return (poly8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a); +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovq_n_p16 (poly16_t __a) +{ + return (poly16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovq_n_s64 (int64_t __a) +{ + return (int64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovq_n_u64 (uint64_t __a) +{ + return (uint64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_lane_s8 (int8x8_t __a, const int __b) +{ + return (int8x8_t)__builtin_neon_vdup_lanev8qi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_lane_s16 (int16x4_t __a, const int __b) +{ + return (int16x4_t)__builtin_neon_vdup_lanev4hi (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_lane_s32 (int32x2_t __a, const int __b) +{ + return (int32x2_t)__builtin_neon_vdup_lanev2si (__a, __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_lane_f32 (float32x2_t __a, const int __b) +{ + return (float32x2_t)__builtin_neon_vdup_lanev2sf (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_lane_u8 (uint8x8_t __a, const int __b) +{ + return (uint8x8_t)__builtin_neon_vdup_lanev8qi ((int8x8_t) __a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_lane_u16 (uint16x4_t __a, const int __b) +{ + return (uint16x4_t)__builtin_neon_vdup_lanev4hi ((int16x4_t) __a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_lane_u32 (uint32x2_t __a, const int __b) +{ + return (uint32x2_t)__builtin_neon_vdup_lanev2si ((int32x2_t) __a, __b); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_lane_p8 (poly8x8_t __a, const int __b) +{ + return (poly8x8_t)__builtin_neon_vdup_lanev8qi ((int8x8_t) __a, __b); +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_lane_p16 (poly16x4_t __a, const int __b) +{ + return (poly16x4_t)__builtin_neon_vdup_lanev4hi ((int16x4_t) __a, __b); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_lane_p64 (poly64x1_t __a, const int __b) +{ + return (poly64x1_t)__builtin_neon_vdup_lanedi (__a, __b); +} + +#pragma GCC pop_options +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_lane_s64 (int64x1_t __a, const int __b) +{ + return (int64x1_t)__builtin_neon_vdup_lanedi (__a, __b); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_lane_u64 (uint64x1_t __a, const int __b) +{ + return (uint64x1_t)__builtin_neon_vdup_lanedi ((int64x1_t) __a, __b); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_lane_s8 (int8x8_t __a, const int __b) +{ + return (int8x16_t)__builtin_neon_vdup_lanev16qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_lane_s16 (int16x4_t __a, const int __b) +{ + return (int16x8_t)__builtin_neon_vdup_lanev8hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_lane_s32 (int32x2_t __a, const int __b) +{ + return (int32x4_t)__builtin_neon_vdup_lanev4si (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_lane_f32 (float32x2_t __a, const int __b) +{ + return (float32x4_t)__builtin_neon_vdup_lanev4sf (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_lane_u8 (uint8x8_t __a, const int __b) +{ + return (uint8x16_t)__builtin_neon_vdup_lanev16qi ((int8x8_t) __a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_lane_u16 (uint16x4_t __a, const int __b) +{ + return (uint16x8_t)__builtin_neon_vdup_lanev8hi ((int16x4_t) __a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_lane_u32 (uint32x2_t __a, const int __b) +{ + return (uint32x4_t)__builtin_neon_vdup_lanev4si ((int32x2_t) __a, __b); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_lane_p8 (poly8x8_t __a, const int __b) +{ + return (poly8x16_t)__builtin_neon_vdup_lanev16qi ((int8x8_t) __a, __b); +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_lane_p16 (poly16x4_t __a, const int __b) +{ + return (poly16x8_t)__builtin_neon_vdup_lanev8hi ((int16x4_t) __a, __b); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_lane_p64 (poly64x1_t __a, const int __b) +{ + return (poly64x2_t)__builtin_neon_vdup_lanev2di (__a, __b); +} + +#pragma GCC pop_options +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_lane_s64 (int64x1_t __a, const int __b) +{ + return (int64x2_t)__builtin_neon_vdup_lanev2di (__a, __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_lane_u64 (uint64x1_t __a, const int __b) +{ + return (uint64x2_t)__builtin_neon_vdup_lanev2di ((int64x1_t) __a, __b); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcombine_p64 (poly64x1_t __a, poly64x1_t __b) +{ + return (poly64x2_t)__builtin_neon_vcombinedi (__a, __b); +} + +#pragma GCC pop_options +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcombine_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int8x16_t)__builtin_neon_vcombinev8qi (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcombine_s16 (int16x4_t __a, int16x4_t __b) +{ + return (int16x8_t)__builtin_neon_vcombinev4hi (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcombine_s32 (int32x2_t __a, int32x2_t __b) +{ + return (int32x4_t)__builtin_neon_vcombinev2si (__a, __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcombine_s64 (int64x1_t __a, int64x1_t __b) +{ + return (int64x2_t)__builtin_neon_vcombinedi (__a, __b); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcombine_f16 (float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vcombinev4hf (__a, __b); +} +#endif + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcombine_f32 (float32x2_t __a, float32x2_t __b) +{ + return (float32x4_t)__builtin_neon_vcombinev2sf (__a, __b); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcombine_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x16_t)__builtin_neon_vcombinev8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcombine_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return (uint16x8_t)__builtin_neon_vcombinev4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcombine_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return (uint32x4_t)__builtin_neon_vcombinev2si ((int32x2_t) __a, (int32x2_t) __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcombine_u64 (uint64x1_t __a, uint64x1_t __b) +{ + return (uint64x2_t)__builtin_neon_vcombinedi ((int64x1_t) __a, (int64x1_t) __b); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcombine_p8 (poly8x8_t __a, poly8x8_t __b) +{ + return (poly8x16_t)__builtin_neon_vcombinev8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcombine_p16 (poly16x4_t __a, poly16x4_t __b) +{ + return (poly16x8_t)__builtin_neon_vcombinev4hi ((int16x4_t) __a, (int16x4_t) __b); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_high_p64 (poly64x2_t __a) +{ + return (poly64x1_t)__builtin_neon_vget_highv2di ((int64x2_t) __a); +} + +#pragma GCC pop_options +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_high_s8 (int8x16_t __a) +{ + return (int8x8_t)__builtin_neon_vget_highv16qi (__a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_high_s16 (int16x8_t __a) +{ + return (int16x4_t)__builtin_neon_vget_highv8hi (__a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_high_s32 (int32x4_t __a) +{ + return (int32x2_t)__builtin_neon_vget_highv4si (__a); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_high_s64 (int64x2_t __a) +{ + return (int64x1_t)__builtin_neon_vget_highv2di (__a); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_high_f16 (float16x8_t __a) +{ + return __builtin_neon_vget_highv8hf (__a); +} +#endif + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_high_f32 (float32x4_t __a) +{ + return (float32x2_t)__builtin_neon_vget_highv4sf (__a); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_high_u8 (uint8x16_t __a) +{ + return (uint8x8_t)__builtin_neon_vget_highv16qi ((int8x16_t) __a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_high_u16 (uint16x8_t __a) +{ + return (uint16x4_t)__builtin_neon_vget_highv8hi ((int16x8_t) __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_high_u32 (uint32x4_t __a) +{ + return (uint32x2_t)__builtin_neon_vget_highv4si ((int32x4_t) __a); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_high_u64 (uint64x2_t __a) +{ + return (uint64x1_t)__builtin_neon_vget_highv2di ((int64x2_t) __a); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_high_p8 (poly8x16_t __a) +{ + return (poly8x8_t)__builtin_neon_vget_highv16qi ((int8x16_t) __a); +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_high_p16 (poly16x8_t __a) +{ + return (poly16x4_t)__builtin_neon_vget_highv8hi ((int16x8_t) __a); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_low_s8 (int8x16_t __a) +{ + return (int8x8_t)__builtin_neon_vget_lowv16qi (__a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_low_s16 (int16x8_t __a) +{ + return (int16x4_t)__builtin_neon_vget_lowv8hi (__a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_low_s32 (int32x4_t __a) +{ + return (int32x2_t)__builtin_neon_vget_lowv4si (__a); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_low_f16 (float16x8_t __a) +{ + return __builtin_neon_vget_lowv8hf (__a); +} +#endif + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_low_f32 (float32x4_t __a) +{ + return (float32x2_t)__builtin_neon_vget_lowv4sf (__a); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_low_u8 (uint8x16_t __a) +{ + return (uint8x8_t)__builtin_neon_vget_lowv16qi ((int8x16_t) __a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_low_u16 (uint16x8_t __a) +{ + return (uint16x4_t)__builtin_neon_vget_lowv8hi ((int16x8_t) __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_low_u32 (uint32x4_t __a) +{ + return (uint32x2_t)__builtin_neon_vget_lowv4si ((int32x4_t) __a); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_low_p8 (poly8x16_t __a) +{ + return (poly8x8_t)__builtin_neon_vget_lowv16qi ((int8x16_t) __a); +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_low_p16 (poly16x8_t __a) +{ + return (poly16x4_t)__builtin_neon_vget_lowv8hi ((int16x8_t) __a); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_low_p64 (poly64x2_t __a) +{ + return (poly64x1_t)__builtin_neon_vget_lowv2di ((int64x2_t) __a); +} + +#pragma GCC pop_options +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_low_s64 (int64x2_t __a) +{ + return (int64x1_t)__builtin_neon_vget_lowv2di (__a); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vget_low_u64 (uint64x2_t __a) +{ + return (uint64x1_t)__builtin_neon_vget_lowv2di ((int64x2_t) __a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_s32_f32 (float32x2_t __a) +{ + return (int32x2_t)__builtin_neon_vcvtsv2sf (__a); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_f32_s32 (int32x2_t __a) +{ + return (float32x2_t)__builtin_neon_vcvtsv2si (__a); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_f32_u32 (uint32x2_t __a) +{ + return (float32x2_t)__builtin_neon_vcvtuv2si ((int32x2_t) __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_u32_f32 (float32x2_t __a) +{ + return (uint32x2_t)__builtin_neon_vcvtuv2sf (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_s32_f32 (float32x4_t __a) +{ + return (int32x4_t)__builtin_neon_vcvtsv4sf (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_f32_s32 (int32x4_t __a) +{ + return (float32x4_t)__builtin_neon_vcvtsv4si (__a); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_f32_u32 (uint32x4_t __a) +{ + return (float32x4_t)__builtin_neon_vcvtuv4si ((int32x4_t) __a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_u32_f32 (float32x4_t __a) +{ + return (uint32x4_t)__builtin_neon_vcvtuv4sf (__a); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=neon-fp16") +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_f16_f32 (float32x4_t __a) +{ + return (float16x4_t)__builtin_neon_vcvtv4hfv4sf (__a); +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_f32_f16 (float16x4_t __a) +{ + return (float32x4_t)__builtin_neon_vcvtv4sfv4hf (__a); +} +#endif +#pragma GCC pop_options + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_n_s32_f32 (float32x2_t __a, const int __b) +{ + return (int32x2_t)__builtin_neon_vcvts_nv2sf (__a, __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_n_f32_s32 (int32x2_t __a, const int __b) +{ + return (float32x2_t)__builtin_neon_vcvts_nv2si (__a, __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_n_f32_u32 (uint32x2_t __a, const int __b) +{ + return (float32x2_t)__builtin_neon_vcvtu_nv2si ((int32x2_t) __a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_n_u32_f32 (float32x2_t __a, const int __b) +{ + return (uint32x2_t)__builtin_neon_vcvtu_nv2sf (__a, __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_n_s32_f32 (float32x4_t __a, const int __b) +{ + return (int32x4_t)__builtin_neon_vcvts_nv4sf (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_n_f32_s32 (int32x4_t __a, const int __b) +{ + return (float32x4_t)__builtin_neon_vcvts_nv4si (__a, __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_n_f32_u32 (uint32x4_t __a, const int __b) +{ + return (float32x4_t)__builtin_neon_vcvtu_nv4si ((int32x4_t) __a, __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_n_u32_f32 (float32x4_t __a, const int __b) +{ + return (uint32x4_t)__builtin_neon_vcvtu_nv4sf (__a, __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovn_s16 (int16x8_t __a) +{ + return (int8x8_t)__builtin_neon_vmovnv8hi (__a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovn_s32 (int32x4_t __a) +{ + return (int16x4_t)__builtin_neon_vmovnv4si (__a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovn_s64 (int64x2_t __a) +{ + return (int32x2_t)__builtin_neon_vmovnv2di (__a); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovn_u16 (uint16x8_t __a) +{ + return (uint8x8_t)__builtin_neon_vmovnv8hi ((int16x8_t) __a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovn_u32 (uint32x4_t __a) +{ + return (uint16x4_t)__builtin_neon_vmovnv4si ((int32x4_t) __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovn_u64 (uint64x2_t __a) +{ + return (uint32x2_t)__builtin_neon_vmovnv2di ((int64x2_t) __a); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqmovn_s16 (int16x8_t __a) +{ + return (int8x8_t)__builtin_neon_vqmovnsv8hi (__a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqmovn_s32 (int32x4_t __a) +{ + return (int16x4_t)__builtin_neon_vqmovnsv4si (__a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqmovn_s64 (int64x2_t __a) +{ + return (int32x2_t)__builtin_neon_vqmovnsv2di (__a); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqmovn_u16 (uint16x8_t __a) +{ + return (uint8x8_t)__builtin_neon_vqmovnuv8hi ((int16x8_t) __a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqmovn_u32 (uint32x4_t __a) +{ + return (uint16x4_t)__builtin_neon_vqmovnuv4si ((int32x4_t) __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqmovn_u64 (uint64x2_t __a) +{ + return (uint32x2_t)__builtin_neon_vqmovnuv2di ((int64x2_t) __a); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqmovun_s16 (int16x8_t __a) +{ + return (uint8x8_t)__builtin_neon_vqmovunv8hi (__a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqmovun_s32 (int32x4_t __a) +{ + return (uint16x4_t)__builtin_neon_vqmovunv4si (__a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqmovun_s64 (int64x2_t __a) +{ + return (uint32x2_t)__builtin_neon_vqmovunv2di (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovl_s8 (int8x8_t __a) +{ + return (int16x8_t)__builtin_neon_vmovlsv8qi (__a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovl_s16 (int16x4_t __a) +{ + return (int32x4_t)__builtin_neon_vmovlsv4hi (__a); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovl_s32 (int32x2_t __a) +{ + return (int64x2_t)__builtin_neon_vmovlsv2si (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovl_u8 (uint8x8_t __a) +{ + return (uint16x8_t)__builtin_neon_vmovluv8qi ((int8x8_t) __a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovl_u16 (uint16x4_t __a) +{ + return (uint32x4_t)__builtin_neon_vmovluv4hi ((int16x4_t) __a); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovl_u32 (uint32x2_t __a) +{ + return (uint64x2_t)__builtin_neon_vmovluv2si ((int32x2_t) __a); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbl1_s8 (int8x8_t __a, int8x8_t __b) +{ + return (int8x8_t)__builtin_neon_vtbl1v8qi (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbl1_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return (uint8x8_t)__builtin_neon_vtbl1v8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbl1_p8 (poly8x8_t __a, uint8x8_t __b) +{ + return (poly8x8_t)__builtin_neon_vtbl1v8qi ((int8x8_t) __a, (int8x8_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbl2_s8 (int8x8x2_t __a, int8x8_t __b) +{ + union { int8x8x2_t __i; __builtin_neon_ti __o; } __au = { __a }; + return (int8x8_t)__builtin_neon_vtbl2v8qi (__au.__o, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbl2_u8 (uint8x8x2_t __a, uint8x8_t __b) +{ + union { uint8x8x2_t __i; __builtin_neon_ti __o; } __au = { __a }; + return (uint8x8_t)__builtin_neon_vtbl2v8qi (__au.__o, (int8x8_t) __b); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbl2_p8 (poly8x8x2_t __a, uint8x8_t __b) +{ + union { poly8x8x2_t __i; __builtin_neon_ti __o; } __au = { __a }; + return (poly8x8_t)__builtin_neon_vtbl2v8qi (__au.__o, (int8x8_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbl3_s8 (int8x8x3_t __a, int8x8_t __b) +{ + union { int8x8x3_t __i; __builtin_neon_ei __o; } __au = { __a }; + return (int8x8_t)__builtin_neon_vtbl3v8qi (__au.__o, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbl3_u8 (uint8x8x3_t __a, uint8x8_t __b) +{ + union { uint8x8x3_t __i; __builtin_neon_ei __o; } __au = { __a }; + return (uint8x8_t)__builtin_neon_vtbl3v8qi (__au.__o, (int8x8_t) __b); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbl3_p8 (poly8x8x3_t __a, uint8x8_t __b) +{ + union { poly8x8x3_t __i; __builtin_neon_ei __o; } __au = { __a }; + return (poly8x8_t)__builtin_neon_vtbl3v8qi (__au.__o, (int8x8_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbl4_s8 (int8x8x4_t __a, int8x8_t __b) +{ + union { int8x8x4_t __i; __builtin_neon_oi __o; } __au = { __a }; + return (int8x8_t)__builtin_neon_vtbl4v8qi (__au.__o, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbl4_u8 (uint8x8x4_t __a, uint8x8_t __b) +{ + union { uint8x8x4_t __i; __builtin_neon_oi __o; } __au = { __a }; + return (uint8x8_t)__builtin_neon_vtbl4v8qi (__au.__o, (int8x8_t) __b); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbl4_p8 (poly8x8x4_t __a, uint8x8_t __b) +{ + union { poly8x8x4_t __i; __builtin_neon_oi __o; } __au = { __a }; + return (poly8x8_t)__builtin_neon_vtbl4v8qi (__au.__o, (int8x8_t) __b); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbx1_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c) +{ + return (int8x8_t)__builtin_neon_vtbx1v8qi (__a, __b, __c); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbx1_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) +{ + return (uint8x8_t)__builtin_neon_vtbx1v8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbx1_p8 (poly8x8_t __a, poly8x8_t __b, uint8x8_t __c) +{ + return (poly8x8_t)__builtin_neon_vtbx1v8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbx2_s8 (int8x8_t __a, int8x8x2_t __b, int8x8_t __c) +{ + union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + return (int8x8_t)__builtin_neon_vtbx2v8qi (__a, __bu.__o, __c); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbx2_u8 (uint8x8_t __a, uint8x8x2_t __b, uint8x8_t __c) +{ + union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + return (uint8x8_t)__builtin_neon_vtbx2v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbx2_p8 (poly8x8_t __a, poly8x8x2_t __b, uint8x8_t __c) +{ + union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + return (poly8x8_t)__builtin_neon_vtbx2v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbx3_s8 (int8x8_t __a, int8x8x3_t __b, int8x8_t __c) +{ + union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + return (int8x8_t)__builtin_neon_vtbx3v8qi (__a, __bu.__o, __c); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbx3_u8 (uint8x8_t __a, uint8x8x3_t __b, uint8x8_t __c) +{ + union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + return (uint8x8_t)__builtin_neon_vtbx3v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbx3_p8 (poly8x8_t __a, poly8x8x3_t __b, uint8x8_t __c) +{ + union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + return (poly8x8_t)__builtin_neon_vtbx3v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbx4_s8 (int8x8_t __a, int8x8x4_t __b, int8x8_t __c) +{ + union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + return (int8x8_t)__builtin_neon_vtbx4v8qi (__a, __bu.__o, __c); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbx4_u8 (uint8x8_t __a, uint8x8x4_t __b, uint8x8_t __c) +{ + union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + return (uint8x8_t)__builtin_neon_vtbx4v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtbx4_p8 (poly8x8_t __a, poly8x8x4_t __b, uint8x8_t __c) +{ + union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + return (poly8x8_t)__builtin_neon_vtbx4v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c) +{ + return (int16x4_t)__builtin_neon_vmul_lanev4hi (__a, __b, __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c) +{ + return (int32x2_t)__builtin_neon_vmul_lanev2si (__a, __b, __c); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_lane_f32 (float32x2_t __a, float32x2_t __b, const int __c) +{ + return (float32x2_t)__builtin_neon_vmul_lanev2sf (__a, __b, __c); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __c) +{ + return (uint16x4_t)__builtin_neon_vmul_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, __c); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __c) +{ + return (uint32x2_t)__builtin_neon_vmul_lanev2si ((int32x2_t) __a, (int32x2_t) __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c) +{ + return (int16x8_t)__builtin_neon_vmul_lanev8hi (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c) +{ + return (int32x4_t)__builtin_neon_vmul_lanev4si (__a, __b, __c); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_lane_f32 (float32x4_t __a, float32x2_t __b, const int __c) +{ + return (float32x4_t)__builtin_neon_vmul_lanev4sf (__a, __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_lane_u16 (uint16x8_t __a, uint16x4_t __b, const int __c) +{ + return (uint16x8_t)__builtin_neon_vmul_lanev8hi ((int16x8_t) __a, (int16x4_t) __b, __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_lane_u32 (uint32x4_t __a, uint32x2_t __b, const int __c) +{ + return (uint32x4_t)__builtin_neon_vmul_lanev4si ((int32x4_t) __a, (int32x2_t) __b, __c); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmla_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d) +{ + return (int16x4_t)__builtin_neon_vmla_lanev4hi (__a, __b, __c, __d); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmla_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d) +{ + return (int32x2_t)__builtin_neon_vmla_lanev2si (__a, __b, __c, __d); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmla_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __d) +{ + return (float32x2_t)__builtin_neon_vmla_lanev2sf (__a, __b, __c, __d); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmla_lane_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d) +{ + return (uint16x4_t)__builtin_neon_vmla_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmla_lane_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d) +{ + return (uint32x2_t)__builtin_neon_vmla_lanev2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlaq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d) +{ + return (int16x8_t)__builtin_neon_vmla_lanev8hi (__a, __b, __c, __d); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlaq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d) +{ + return (int32x4_t)__builtin_neon_vmla_lanev4si (__a, __b, __c, __d); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __d) +{ + return (float32x4_t)__builtin_neon_vmla_lanev4sf (__a, __b, __c, __d); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlaq_lane_u16 (uint16x8_t __a, uint16x8_t __b, uint16x4_t __c, const int __d) +{ + return (uint16x8_t)__builtin_neon_vmla_lanev8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x4_t) __c, __d); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlaq_lane_u32 (uint32x4_t __a, uint32x4_t __b, uint32x2_t __c, const int __d) +{ + return (uint32x4_t)__builtin_neon_vmla_lanev4si ((int32x4_t) __a, (int32x4_t) __b, (int32x2_t) __c, __d); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d) +{ + return (int32x4_t)__builtin_neon_vmlals_lanev4hi (__a, __b, __c, __d); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d) +{ + return (int64x2_t)__builtin_neon_vmlals_lanev2si (__a, __b, __c, __d); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_lane_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d) +{ + return (uint32x4_t)__builtin_neon_vmlalu_lanev4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_lane_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d) +{ + return (uint64x2_t)__builtin_neon_vmlalu_lanev2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d) +{ + return (int32x4_t)__builtin_neon_vqdmlal_lanev4hi (__a, __b, __c, __d); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d) +{ + return (int64x2_t)__builtin_neon_vqdmlal_lanev2si (__a, __b, __c, __d); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmls_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d) +{ + return (int16x4_t)__builtin_neon_vmls_lanev4hi (__a, __b, __c, __d); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmls_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d) +{ + return (int32x2_t)__builtin_neon_vmls_lanev2si (__a, __b, __c, __d); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmls_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __d) +{ + return (float32x2_t)__builtin_neon_vmls_lanev2sf (__a, __b, __c, __d); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmls_lane_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d) +{ + return (uint16x4_t)__builtin_neon_vmls_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmls_lane_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d) +{ + return (uint32x2_t)__builtin_neon_vmls_lanev2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d) +{ + return (int16x8_t)__builtin_neon_vmls_lanev8hi (__a, __b, __c, __d); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d) +{ + return (int32x4_t)__builtin_neon_vmls_lanev4si (__a, __b, __c, __d); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __d) +{ + return (float32x4_t)__builtin_neon_vmls_lanev4sf (__a, __b, __c, __d); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsq_lane_u16 (uint16x8_t __a, uint16x8_t __b, uint16x4_t __c, const int __d) +{ + return (uint16x8_t)__builtin_neon_vmls_lanev8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x4_t) __c, __d); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsq_lane_u32 (uint32x4_t __a, uint32x4_t __b, uint32x2_t __c, const int __d) +{ + return (uint32x4_t)__builtin_neon_vmls_lanev4si ((int32x4_t) __a, (int32x4_t) __b, (int32x2_t) __c, __d); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d) +{ + return (int32x4_t)__builtin_neon_vmlsls_lanev4hi (__a, __b, __c, __d); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d) +{ + return (int64x2_t)__builtin_neon_vmlsls_lanev2si (__a, __b, __c, __d); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsl_lane_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d) +{ + return (uint32x4_t)__builtin_neon_vmlslu_lanev4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsl_lane_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d) +{ + return (uint64x2_t)__builtin_neon_vmlslu_lanev2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d) +{ + return (int32x4_t)__builtin_neon_vqdmlsl_lanev4hi (__a, __b, __c, __d); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d) +{ + return (int64x2_t)__builtin_neon_vqdmlsl_lanev2si (__a, __b, __c, __d); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c) +{ + return (int32x4_t)__builtin_neon_vmulls_lanev4hi (__a, __b, __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c) +{ + return (int64x2_t)__builtin_neon_vmulls_lanev2si (__a, __b, __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __c) +{ + return (uint32x4_t)__builtin_neon_vmullu_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, __c); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __c) +{ + return (uint64x2_t)__builtin_neon_vmullu_lanev2si ((int32x2_t) __a, (int32x2_t) __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmull_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c) +{ + return (int32x4_t)__builtin_neon_vqdmull_lanev4hi (__a, __b, __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmull_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c) +{ + return (int64x2_t)__builtin_neon_vqdmull_lanev2si (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c) +{ + return (int16x8_t)__builtin_neon_vqdmulh_lanev8hi (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c) +{ + return (int32x4_t)__builtin_neon_vqdmulh_lanev4si (__a, __b, __c); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c) +{ + return (int16x4_t)__builtin_neon_vqdmulh_lanev4hi (__a, __b, __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c) +{ + return (int32x2_t)__builtin_neon_vqdmulh_lanev2si (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c) +{ + return (int16x8_t)__builtin_neon_vqrdmulh_lanev8hi (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c) +{ + return (int32x4_t)__builtin_neon_vqrdmulh_lanev4si (__a, __b, __c); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c) +{ + return (int16x4_t)__builtin_neon_vqrdmulh_lanev4hi (__a, __b, __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c) +{ + return (int32x2_t)__builtin_neon_vqrdmulh_lanev2si (__a, __b, __c); +} + +#ifdef __ARM_FEATURE_QRDMX +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmlahq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d) +{ + return (int16x8_t)__builtin_neon_vqrdmlah_lanev8hi (__a, __b, __c, __d); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmlahq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d) +{ + return (int32x4_t)__builtin_neon_vqrdmlah_lanev4si (__a, __b, __c, __d); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmlah_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d) +{ + return (int16x4_t)__builtin_neon_vqrdmlah_lanev4hi (__a, __b, __c, __d); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmlah_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d) +{ + return (int32x2_t)__builtin_neon_vqrdmlah_lanev2si (__a, __b, __c, __d); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmlshq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d) +{ + return (int16x8_t)__builtin_neon_vqrdmlsh_lanev8hi (__a, __b, __c, __d); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmlshq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d) +{ + return (int32x4_t)__builtin_neon_vqrdmlsh_lanev4si (__a, __b, __c, __d); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmlsh_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d) +{ + return (int16x4_t)__builtin_neon_vqrdmlsh_lanev4hi (__a, __b, __c, __d); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmlsh_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d) +{ + return (int32x2_t)__builtin_neon_vqrdmlsh_lanev2si (__a, __b, __c, __d); +} +#endif + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_n_s16 (int16x4_t __a, int16_t __b) +{ + return (int16x4_t)__builtin_neon_vmul_nv4hi (__a, (__builtin_neon_hi) __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_n_s32 (int32x2_t __a, int32_t __b) +{ + return (int32x2_t)__builtin_neon_vmul_nv2si (__a, (__builtin_neon_si) __b); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_n_f32 (float32x2_t __a, float32_t __b) +{ + return (float32x2_t)__builtin_neon_vmul_nv2sf (__a, (__builtin_neon_sf) __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_n_u16 (uint16x4_t __a, uint16_t __b) +{ + return (uint16x4_t)__builtin_neon_vmul_nv4hi ((int16x4_t) __a, (__builtin_neon_hi) __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_n_u32 (uint32x2_t __a, uint32_t __b) +{ + return (uint32x2_t)__builtin_neon_vmul_nv2si ((int32x2_t) __a, (__builtin_neon_si) __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_n_s16 (int16x8_t __a, int16_t __b) +{ + return (int16x8_t)__builtin_neon_vmul_nv8hi (__a, (__builtin_neon_hi) __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_n_s32 (int32x4_t __a, int32_t __b) +{ + return (int32x4_t)__builtin_neon_vmul_nv4si (__a, (__builtin_neon_si) __b); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_n_f32 (float32x4_t __a, float32_t __b) +{ + return (float32x4_t)__builtin_neon_vmul_nv4sf (__a, (__builtin_neon_sf) __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_n_u16 (uint16x8_t __a, uint16_t __b) +{ + return (uint16x8_t)__builtin_neon_vmul_nv8hi ((int16x8_t) __a, (__builtin_neon_hi) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_n_u32 (uint32x4_t __a, uint32_t __b) +{ + return (uint32x4_t)__builtin_neon_vmul_nv4si ((int32x4_t) __a, (__builtin_neon_si) __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_n_s16 (int16x4_t __a, int16_t __b) +{ + return (int32x4_t)__builtin_neon_vmulls_nv4hi (__a, (__builtin_neon_hi) __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_n_s32 (int32x2_t __a, int32_t __b) +{ + return (int64x2_t)__builtin_neon_vmulls_nv2si (__a, (__builtin_neon_si) __b); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_n_u16 (uint16x4_t __a, uint16_t __b) +{ + return (uint32x4_t)__builtin_neon_vmullu_nv4hi ((int16x4_t) __a, (__builtin_neon_hi) __b); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_n_u32 (uint32x2_t __a, uint32_t __b) +{ + return (uint64x2_t)__builtin_neon_vmullu_nv2si ((int32x2_t) __a, (__builtin_neon_si) __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmull_n_s16 (int16x4_t __a, int16_t __b) +{ + return (int32x4_t)__builtin_neon_vqdmull_nv4hi (__a, (__builtin_neon_hi) __b); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmull_n_s32 (int32x2_t __a, int32_t __b) +{ + return (int64x2_t)__builtin_neon_vqdmull_nv2si (__a, (__builtin_neon_si) __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmulhq_n_s16 (int16x8_t __a, int16_t __b) +{ + return (int16x8_t)__builtin_neon_vqdmulh_nv8hi (__a, (__builtin_neon_hi) __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmulhq_n_s32 (int32x4_t __a, int32_t __b) +{ + return (int32x4_t)__builtin_neon_vqdmulh_nv4si (__a, (__builtin_neon_si) __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmulh_n_s16 (int16x4_t __a, int16_t __b) +{ + return (int16x4_t)__builtin_neon_vqdmulh_nv4hi (__a, (__builtin_neon_hi) __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmulh_n_s32 (int32x2_t __a, int32_t __b) +{ + return (int32x2_t)__builtin_neon_vqdmulh_nv2si (__a, (__builtin_neon_si) __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmulhq_n_s16 (int16x8_t __a, int16_t __b) +{ + return (int16x8_t)__builtin_neon_vqrdmulh_nv8hi (__a, (__builtin_neon_hi) __b); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmulhq_n_s32 (int32x4_t __a, int32_t __b) +{ + return (int32x4_t)__builtin_neon_vqrdmulh_nv4si (__a, (__builtin_neon_si) __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmulh_n_s16 (int16x4_t __a, int16_t __b) +{ + return (int16x4_t)__builtin_neon_vqrdmulh_nv4hi (__a, (__builtin_neon_hi) __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqrdmulh_n_s32 (int32x2_t __a, int32_t __b) +{ + return (int32x2_t)__builtin_neon_vqrdmulh_nv2si (__a, (__builtin_neon_si) __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmla_n_s16 (int16x4_t __a, int16x4_t __b, int16_t __c) +{ + return (int16x4_t)__builtin_neon_vmla_nv4hi (__a, __b, (__builtin_neon_hi) __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmla_n_s32 (int32x2_t __a, int32x2_t __b, int32_t __c) +{ + return (int32x2_t)__builtin_neon_vmla_nv2si (__a, __b, (__builtin_neon_si) __c); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c) +{ + return (float32x2_t)__builtin_neon_vmla_nv2sf (__a, __b, (__builtin_neon_sf) __c); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmla_n_u16 (uint16x4_t __a, uint16x4_t __b, uint16_t __c) +{ + return (uint16x4_t)__builtin_neon_vmla_nv4hi ((int16x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmla_n_u32 (uint32x2_t __a, uint32x2_t __b, uint32_t __c) +{ + return (uint32x2_t)__builtin_neon_vmla_nv2si ((int32x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlaq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) +{ + return (int16x8_t)__builtin_neon_vmla_nv8hi (__a, __b, (__builtin_neon_hi) __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlaq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) +{ + return (int32x4_t)__builtin_neon_vmla_nv4si (__a, __b, (__builtin_neon_si) __c); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c) +{ + return (float32x4_t)__builtin_neon_vmla_nv4sf (__a, __b, (__builtin_neon_sf) __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlaq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) +{ + return (uint16x8_t)__builtin_neon_vmla_nv8hi ((int16x8_t) __a, (int16x8_t) __b, (__builtin_neon_hi) __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlaq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) +{ + return (uint32x4_t)__builtin_neon_vmla_nv4si ((int32x4_t) __a, (int32x4_t) __b, (__builtin_neon_si) __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c) +{ + return (int32x4_t)__builtin_neon_vmlals_nv4hi (__a, __b, (__builtin_neon_hi) __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c) +{ + return (int64x2_t)__builtin_neon_vmlals_nv2si (__a, __b, (__builtin_neon_si) __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c) +{ + return (uint32x4_t)__builtin_neon_vmlalu_nv4hi ((int32x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c) +{ + return (uint64x2_t)__builtin_neon_vmlalu_nv2si ((int64x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c) +{ + return (int32x4_t)__builtin_neon_vqdmlal_nv4hi (__a, __b, (__builtin_neon_hi) __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c) +{ + return (int64x2_t)__builtin_neon_vqdmlal_nv2si (__a, __b, (__builtin_neon_si) __c); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmls_n_s16 (int16x4_t __a, int16x4_t __b, int16_t __c) +{ + return (int16x4_t)__builtin_neon_vmls_nv4hi (__a, __b, (__builtin_neon_hi) __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmls_n_s32 (int32x2_t __a, int32x2_t __b, int32_t __c) +{ + return (int32x2_t)__builtin_neon_vmls_nv2si (__a, __b, (__builtin_neon_si) __c); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c) +{ + return (float32x2_t)__builtin_neon_vmls_nv2sf (__a, __b, (__builtin_neon_sf) __c); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmls_n_u16 (uint16x4_t __a, uint16x4_t __b, uint16_t __c) +{ + return (uint16x4_t)__builtin_neon_vmls_nv4hi ((int16x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmls_n_u32 (uint32x2_t __a, uint32x2_t __b, uint32_t __c) +{ + return (uint32x2_t)__builtin_neon_vmls_nv2si ((int32x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) +{ + return (int16x8_t)__builtin_neon_vmls_nv8hi (__a, __b, (__builtin_neon_hi) __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) +{ + return (int32x4_t)__builtin_neon_vmls_nv4si (__a, __b, (__builtin_neon_si) __c); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c) +{ + return (float32x4_t)__builtin_neon_vmls_nv4sf (__a, __b, (__builtin_neon_sf) __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) +{ + return (uint16x8_t)__builtin_neon_vmls_nv8hi ((int16x8_t) __a, (int16x8_t) __b, (__builtin_neon_hi) __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) +{ + return (uint32x4_t)__builtin_neon_vmls_nv4si ((int32x4_t) __a, (int32x4_t) __b, (__builtin_neon_si) __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c) +{ + return (int32x4_t)__builtin_neon_vmlsls_nv4hi (__a, __b, (__builtin_neon_hi) __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c) +{ + return (int64x2_t)__builtin_neon_vmlsls_nv2si (__a, __b, (__builtin_neon_si) __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsl_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c) +{ + return (uint32x4_t)__builtin_neon_vmlslu_nv4hi ((int32x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlsl_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c) +{ + return (uint64x2_t)__builtin_neon_vmlslu_nv2si ((int64x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c) +{ + return (int32x4_t)__builtin_neon_vqdmlsl_nv4hi (__a, __b, (__builtin_neon_hi) __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vqdmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c) +{ + return (int64x2_t)__builtin_neon_vqdmlsl_nv2si (__a, __b, (__builtin_neon_si) __c); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vext_p64 (poly64x1_t __a, poly64x1_t __b, const int __c) +{ + return (poly64x1_t)__builtin_neon_vextdi (__a, __b, __c); +} + +#pragma GCC pop_options +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vext_s8 (int8x8_t __a, int8x8_t __b, const int __c) +{ + return (int8x8_t)__builtin_neon_vextv8qi (__a, __b, __c); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vext_s16 (int16x4_t __a, int16x4_t __b, const int __c) +{ + return (int16x4_t)__builtin_neon_vextv4hi (__a, __b, __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vext_s32 (int32x2_t __a, int32x2_t __b, const int __c) +{ + return (int32x2_t)__builtin_neon_vextv2si (__a, __b, __c); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vext_s64 (int64x1_t __a, int64x1_t __b, const int __c) +{ + return (int64x1_t)__builtin_neon_vextdi (__a, __b, __c); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vext_f32 (float32x2_t __a, float32x2_t __b, const int __c) +{ + return (float32x2_t)__builtin_neon_vextv2sf (__a, __b, __c); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vext_u8 (uint8x8_t __a, uint8x8_t __b, const int __c) +{ + return (uint8x8_t)__builtin_neon_vextv8qi ((int8x8_t) __a, (int8x8_t) __b, __c); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vext_u16 (uint16x4_t __a, uint16x4_t __b, const int __c) +{ + return (uint16x4_t)__builtin_neon_vextv4hi ((int16x4_t) __a, (int16x4_t) __b, __c); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vext_u32 (uint32x2_t __a, uint32x2_t __b, const int __c) +{ + return (uint32x2_t)__builtin_neon_vextv2si ((int32x2_t) __a, (int32x2_t) __b, __c); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vext_u64 (uint64x1_t __a, uint64x1_t __b, const int __c) +{ + return (uint64x1_t)__builtin_neon_vextdi ((int64x1_t) __a, (int64x1_t) __b, __c); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vext_p8 (poly8x8_t __a, poly8x8_t __b, const int __c) +{ + return (poly8x8_t)__builtin_neon_vextv8qi ((int8x8_t) __a, (int8x8_t) __b, __c); +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vext_p16 (poly16x4_t __a, poly16x4_t __b, const int __c) +{ + return (poly16x4_t)__builtin_neon_vextv4hi ((int16x4_t) __a, (int16x4_t) __b, __c); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vextq_p64 (poly64x2_t __a, poly64x2_t __b, const int __c) +{ + return (poly64x2_t)__builtin_neon_vextv2di ((int64x2_t) __a, (int64x2_t) __b, __c); +} + +#pragma GCC pop_options +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vextq_s8 (int8x16_t __a, int8x16_t __b, const int __c) +{ + return (int8x16_t)__builtin_neon_vextv16qi (__a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vextq_s16 (int16x8_t __a, int16x8_t __b, const int __c) +{ + return (int16x8_t)__builtin_neon_vextv8hi (__a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vextq_s32 (int32x4_t __a, int32x4_t __b, const int __c) +{ + return (int32x4_t)__builtin_neon_vextv4si (__a, __b, __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vextq_s64 (int64x2_t __a, int64x2_t __b, const int __c) +{ + return (int64x2_t)__builtin_neon_vextv2di (__a, __b, __c); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vextq_f32 (float32x4_t __a, float32x4_t __b, const int __c) +{ + return (float32x4_t)__builtin_neon_vextv4sf (__a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vextq_u8 (uint8x16_t __a, uint8x16_t __b, const int __c) +{ + return (uint8x16_t)__builtin_neon_vextv16qi ((int8x16_t) __a, (int8x16_t) __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vextq_u16 (uint16x8_t __a, uint16x8_t __b, const int __c) +{ + return (uint16x8_t)__builtin_neon_vextv8hi ((int16x8_t) __a, (int16x8_t) __b, __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vextq_u32 (uint32x4_t __a, uint32x4_t __b, const int __c) +{ + return (uint32x4_t)__builtin_neon_vextv4si ((int32x4_t) __a, (int32x4_t) __b, __c); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vextq_u64 (uint64x2_t __a, uint64x2_t __b, const int __c) +{ + return (uint64x2_t)__builtin_neon_vextv2di ((int64x2_t) __a, (int64x2_t) __b, __c); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vextq_p8 (poly8x16_t __a, poly8x16_t __b, const int __c) +{ + return (poly8x16_t)__builtin_neon_vextv16qi ((int8x16_t) __a, (int8x16_t) __b, __c); +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vextq_p16 (poly16x8_t __a, poly16x8_t __b, const int __c) +{ + return (poly16x8_t)__builtin_neon_vextv8hi ((int16x8_t) __a, (int16x8_t) __b, __c); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64_s8 (int8x8_t __a) +{ + return (int8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 }); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64_s16 (int16x4_t __a) +{ + return (int16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 3, 2, 1, 0 }); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64_s32 (int32x2_t __a) +{ + return (int32x2_t) __builtin_shuffle (__a, (uint32x2_t) { 1, 0 }); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64_f32 (float32x2_t __a) +{ + return (float32x2_t) __builtin_shuffle (__a, (uint32x2_t) { 1, 0 }); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64_u8 (uint8x8_t __a) +{ + return (uint8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 }); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64_u16 (uint16x4_t __a) +{ + return (uint16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 3, 2, 1, 0 }); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64_u32 (uint32x2_t __a) +{ + return (uint32x2_t) __builtin_shuffle (__a, (uint32x2_t) { 1, 0 }); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64_p8 (poly8x8_t __a) +{ + return (poly8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 }); +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64_p16 (poly16x4_t __a) +{ + return (poly16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 3, 2, 1, 0 }); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64q_s8 (int8x16_t __a) +{ + return (int8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 }); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64q_s16 (int16x8_t __a) +{ + return (int16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 }); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64q_s32 (int32x4_t __a) +{ + return (int32x4_t) __builtin_shuffle (__a, (uint32x4_t) { 1, 0, 3, 2 }); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64q_f32 (float32x4_t __a) +{ + return (float32x4_t) __builtin_shuffle (__a, (uint32x4_t) { 1, 0, 3, 2 }); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64q_u8 (uint8x16_t __a) +{ + return (uint8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 }); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64q_u16 (uint16x8_t __a) +{ + return (uint16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 }); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64q_u32 (uint32x4_t __a) +{ + return (uint32x4_t) __builtin_shuffle (__a, (uint32x4_t) { 1, 0, 3, 2 }); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64q_p8 (poly8x16_t __a) +{ + return (poly8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 }); +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64q_p16 (poly16x8_t __a) +{ + return (poly16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 }); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev32_s8 (int8x8_t __a) +{ + return (int8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 }); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev32_s16 (int16x4_t __a) +{ + return (int16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 1, 0, 3, 2 }); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev32_u8 (uint8x8_t __a) +{ + return (uint8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 }); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev32_u16 (uint16x4_t __a) +{ + return (uint16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 1, 0, 3, 2 }); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev32_p8 (poly8x8_t __a) +{ + return (poly8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 }); +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev32_p16 (poly16x4_t __a) +{ + return (poly16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 1, 0, 3, 2 }); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev32q_s8 (int8x16_t __a) +{ + return (int8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev32q_s16 (int16x8_t __a) +{ + return (int16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 }); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev32q_u8 (uint8x16_t __a) +{ + return (uint8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev32q_u16 (uint16x8_t __a) +{ + return (uint16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 }); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev32q_p8 (poly8x16_t __a) +{ + return (poly8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }); +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev32q_p16 (poly16x8_t __a) +{ + return (poly16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 }); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev16_s8 (int8x8_t __a) +{ + return (int8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 }); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev16_u8 (uint8x8_t __a) +{ + return (uint8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 }); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev16_p8 (poly8x8_t __a) +{ + return (poly8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 }); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev16q_s8 (int8x16_t __a) +{ + return (int8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 }); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev16q_u8 (uint8x16_t __a) +{ + return (uint8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 }); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev16q_p8 (poly8x16_t __a) +{ + return (poly8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 }); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbsl_p64 (uint64x1_t __a, poly64x1_t __b, poly64x1_t __c) +{ + return (poly64x1_t)__builtin_neon_vbsldi ((int64x1_t) __a, __b, __c); +} + +#pragma GCC pop_options +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbsl_s8 (uint8x8_t __a, int8x8_t __b, int8x8_t __c) +{ + return (int8x8_t)__builtin_neon_vbslv8qi ((int8x8_t) __a, __b, __c); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbsl_s16 (uint16x4_t __a, int16x4_t __b, int16x4_t __c) +{ + return (int16x4_t)__builtin_neon_vbslv4hi ((int16x4_t) __a, __b, __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbsl_s32 (uint32x2_t __a, int32x2_t __b, int32x2_t __c) +{ + return (int32x2_t)__builtin_neon_vbslv2si ((int32x2_t) __a, __b, __c); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbsl_s64 (uint64x1_t __a, int64x1_t __b, int64x1_t __c) +{ + return (int64x1_t)__builtin_neon_vbsldi ((int64x1_t) __a, __b, __c); +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbsl_f32 (uint32x2_t __a, float32x2_t __b, float32x2_t __c) +{ + return (float32x2_t)__builtin_neon_vbslv2sf ((int32x2_t) __a, __b, __c); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbsl_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) +{ + return (uint8x8_t)__builtin_neon_vbslv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbsl_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c) +{ + return (uint16x4_t)__builtin_neon_vbslv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbsl_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c) +{ + return (uint32x2_t)__builtin_neon_vbslv2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbsl_u64 (uint64x1_t __a, uint64x1_t __b, uint64x1_t __c) +{ + return (uint64x1_t)__builtin_neon_vbsldi ((int64x1_t) __a, (int64x1_t) __b, (int64x1_t) __c); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbsl_p8 (uint8x8_t __a, poly8x8_t __b, poly8x8_t __c) +{ + return (poly8x8_t)__builtin_neon_vbslv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c); +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbsl_p16 (uint16x4_t __a, poly16x4_t __b, poly16x4_t __c) +{ + return (poly16x4_t)__builtin_neon_vbslv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbslq_p64 (uint64x2_t __a, poly64x2_t __b, poly64x2_t __c) +{ + return (poly64x2_t)__builtin_neon_vbslv2di ((int64x2_t) __a, (int64x2_t) __b, (int64x2_t) __c); +} + +#pragma GCC pop_options +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbslq_s8 (uint8x16_t __a, int8x16_t __b, int8x16_t __c) +{ + return (int8x16_t)__builtin_neon_vbslv16qi ((int8x16_t) __a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbslq_s16 (uint16x8_t __a, int16x8_t __b, int16x8_t __c) +{ + return (int16x8_t)__builtin_neon_vbslv8hi ((int16x8_t) __a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbslq_s32 (uint32x4_t __a, int32x4_t __b, int32x4_t __c) +{ + return (int32x4_t)__builtin_neon_vbslv4si ((int32x4_t) __a, __b, __c); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbslq_s64 (uint64x2_t __a, int64x2_t __b, int64x2_t __c) +{ + return (int64x2_t)__builtin_neon_vbslv2di ((int64x2_t) __a, __b, __c); +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbslq_f32 (uint32x4_t __a, float32x4_t __b, float32x4_t __c) +{ + return (float32x4_t)__builtin_neon_vbslv4sf ((int32x4_t) __a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbslq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) +{ + return (uint8x16_t)__builtin_neon_vbslv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbslq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c) +{ + return (uint16x8_t)__builtin_neon_vbslv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbslq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) +{ + return (uint32x4_t)__builtin_neon_vbslv4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbslq_u64 (uint64x2_t __a, uint64x2_t __b, uint64x2_t __c) +{ + return (uint64x2_t)__builtin_neon_vbslv2di ((int64x2_t) __a, (int64x2_t) __b, (int64x2_t) __c); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbslq_p8 (uint8x16_t __a, poly8x16_t __b, poly8x16_t __c) +{ + return (poly8x16_t)__builtin_neon_vbslv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c); +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbslq_p16 (uint16x8_t __a, poly16x8_t __b, poly16x8_t __c) +{ + return (poly16x8_t)__builtin_neon_vbslv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c); +} + +/* For big-endian, the shuffle masks for ZIP, UZP and TRN must be changed as + follows. (nelt = the number of elements within a vector.) + + Firstly, a value of N within a mask, becomes (N ^ (nelt - 1)), as gcc vector + extension's indexing scheme is reversed *within each vector* (relative to the + neon intrinsics view), but without changing which of the two vectors. + + Secondly, the elements within each mask are reversed, as the mask is itself a + vector, and will itself be loaded in reverse order (again, relative to the + neon intrinsics view, i.e. that would result from a "vld1" instruction). */ + +__extension__ extern __inline int8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrn_s8 (int8x8_t __a, int8x8_t __b) +{ + int8x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 9, 1, 11, 3, 13, 5, 15, 7 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 8, 0, 10, 2, 12, 4, 14, 6 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 0, 8, 2, 10, 4, 12, 6, 14 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 1, 9, 3, 11, 5, 13, 7, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline int16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrn_s16 (int16x4_t __a, int16x4_t __b) +{ + int16x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 1, 7, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 6, 2 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 2, 6 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 5, 3, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline uint8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrn_u8 (uint8x8_t __a, uint8x8_t __b) +{ + uint8x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 9, 1, 11, 3, 13, 5, 15, 7 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 8, 0, 10, 2, 12, 4, 14, 6 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 0, 8, 2, 10, 4, 12, 6, 14 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 1, 9, 3, 11, 5, 13, 7, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline uint16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrn_u16 (uint16x4_t __a, uint16x4_t __b) +{ + uint16x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 1, 7, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 6, 2 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 2, 6 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 5, 3, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline poly8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrn_p8 (poly8x8_t __a, poly8x8_t __b) +{ + poly8x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 9, 1, 11, 3, 13, 5, 15, 7 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 8, 0, 10, 2, 12, 4, 14, 6 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 0, 8, 2, 10, 4, 12, 6, 14 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 1, 9, 3, 11, 5, 13, 7, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline poly16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrn_p16 (poly16x4_t __a, poly16x4_t __b) +{ + poly16x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 1, 7, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 6, 2 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 2, 6 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 5, 3, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline int32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrn_s32 (int32x2_t __a, int32x2_t __b) +{ + int32x2x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 }); +#endif + return __rv; +} + +__extension__ extern __inline float32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrn_f32 (float32x2_t __a, float32x2_t __b) +{ + float32x2x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 }); +#endif + return __rv; +} + +__extension__ extern __inline uint32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrn_u32 (uint32x2_t __a, uint32x2_t __b) +{ + uint32x2x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 }); +#endif + return __rv; +} + +__extension__ extern __inline int8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrnq_s8 (int8x16_t __a, int8x16_t __b) +{ + int8x16x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 17, 1, 19, 3, 21, 5, 23, 7, 25, 9, 27, 11, 29, 13, 31, 15 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 16, 0, 18, 2, 20, 4, 22, 6, 24, 8, 26, 10, 28, 12, 30, 14 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31 }); +#endif + return __rv; +} + +__extension__ extern __inline int16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrnq_s16 (int16x8_t __a, int16x8_t __b) +{ + int16x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 9, 1, 11, 3, 13, 5, 15, 7 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 8, 0, 10, 2, 12, 4, 14, 6 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 0, 8, 2, 10, 4, 12, 6, 14 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 1, 9, 3, 11, 5, 13, 7, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline int32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrnq_s32 (int32x4_t __a, int32x4_t __b) +{ + int32x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 1, 7, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 4, 0, 6, 2 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 2, 6 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 5, 3, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline float32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrnq_f32 (float32x4_t __a, float32x4_t __b) +{ + float32x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 1, 7, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 4, 0, 6, 2 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 2, 6 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 5, 3, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline uint8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrnq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + uint8x16x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 17, 1, 19, 3, 21, 5, 23, 7, 25, 9, 27, 11, 29, 13, 31, 15 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 16, 0, 18, 2, 20, 4, 22, 6, 24, 8, 26, 10, 28, 12, 30, 14 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31 }); +#endif + return __rv; +} + +__extension__ extern __inline uint16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrnq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + uint16x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 9, 1, 11, 3, 13, 5, 15, 7 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 8, 0, 10, 2, 12, 4, 14, 6 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 0, 8, 2, 10, 4, 12, 6, 14 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 1, 9, 3, 11, 5, 13, 7, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline uint32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrnq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + uint32x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 1, 7, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 4, 0, 6, 2 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 2, 6 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 5, 3, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline poly8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrnq_p8 (poly8x16_t __a, poly8x16_t __b) +{ + poly8x16x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 17, 1, 19, 3, 21, 5, 23, 7, 25, 9, 27, 11, 29, 13, 31, 15 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 16, 0, 18, 2, 20, 4, 22, 6, 24, 8, 26, 10, 28, 12, 30, 14 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31 }); +#endif + return __rv; +} + +__extension__ extern __inline poly16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrnq_p16 (poly16x8_t __a, poly16x8_t __b) +{ + poly16x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 9, 1, 11, 3, 13, 5, 15, 7 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 8, 0, 10, 2, 12, 4, 14, 6 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 0, 8, 2, 10, 4, 12, 6, 14 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 1, 9, 3, 11, 5, 13, 7, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline int8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzip_s8 (int8x8_t __a, int8x8_t __b) +{ + int8x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 12, 4, 13, 5, 14, 6, 15, 7 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 8, 0, 9, 1, 10, 2, 11, 3 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 0, 8, 1, 9, 2, 10, 3, 11 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 4, 12, 5, 13, 6, 14, 7, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline int16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzip_s16 (int16x4_t __a, int16x4_t __b) +{ + int16x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 6, 2, 7, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 5, 1 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 1, 5 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 2, 6, 3, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline uint8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzip_u8 (uint8x8_t __a, uint8x8_t __b) +{ + uint8x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 12, 4, 13, 5, 14, 6, 15, 7 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 8, 0, 9, 1, 10, 2, 11, 3 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 0, 8, 1, 9, 2, 10, 3, 11 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 4, 12, 5, 13, 6, 14, 7, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline uint16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzip_u16 (uint16x4_t __a, uint16x4_t __b) +{ + uint16x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 6, 2, 7, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 5, 1 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 1, 5 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 2, 6, 3, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline poly8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzip_p8 (poly8x8_t __a, poly8x8_t __b) +{ + poly8x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 12, 4, 13, 5, 14, 6, 15, 7 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 8, 0, 9, 1, 10, 2, 11, 3 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 0, 8, 1, 9, 2, 10, 3, 11 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 4, 12, 5, 13, 6, 14, 7, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline poly16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzip_p16 (poly16x4_t __a, poly16x4_t __b) +{ + poly16x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 6, 2, 7, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 5, 1 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 1, 5 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 2, 6, 3, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline int32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzip_s32 (int32x2_t __a, int32x2_t __b) +{ + int32x2x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 }); +#endif + return __rv; +} + +__extension__ extern __inline float32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzip_f32 (float32x2_t __a, float32x2_t __b) +{ + float32x2x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 }); +#endif + return __rv; +} + +__extension__ extern __inline uint32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzip_u32 (uint32x2_t __a, uint32x2_t __b) +{ + uint32x2x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 }); +#endif + return __rv; +} + +__extension__ extern __inline int8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzipq_s8 (int8x16_t __a, int8x16_t __b) +{ + int8x16x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 20, 4, 21, 5, 22, 6, 23, 7, 16, 0, 17, 1, 18, 2, 19, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 28, 12, 29, 13, 30, 14, 31, 15, 24, 8, 25, 9, 26, 10, 27, 11 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 }); +#endif + return __rv; +} + +__extension__ extern __inline int16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzipq_s16 (int16x8_t __a, int16x8_t __b) +{ + int16x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 10, 2, 11, 3, 8, 0, 9, 1 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 14, 6, 15, 7, 12, 4, 13, 5 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 0, 8, 1, 9, 2, 10, 3, 11 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 4, 12, 5, 13, 6, 14, 7, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline int32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzipq_s32 (int32x4_t __a, int32x4_t __b) +{ + int32x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 1, 4, 0 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 7, 3, 6, 2 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 1, 5 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 2, 6, 3, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline float32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzipq_f32 (float32x4_t __a, float32x4_t __b) +{ + float32x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 1, 4, 0 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 7, 3, 6, 2 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 1, 5 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 2, 6, 3, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline uint8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzipq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + uint8x16x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 20, 4, 21, 5, 22, 6, 23, 7, 16, 0, 17, 1, 18, 2, 19, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 28, 12, 29, 13, 30, 14, 31, 15, 24, 8, 25, 9, 26, 10, 27, 11 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 }); +#endif + return __rv; +} + +__extension__ extern __inline uint16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzipq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + uint16x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 10, 2, 11, 3, 8, 0, 9, 1 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 14, 6, 15, 7, 12, 4, 13, 5 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 0, 8, 1, 9, 2, 10, 3, 11 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 4, 12, 5, 13, 6, 14, 7, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline uint32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzipq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + uint32x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 1, 4, 0 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 7, 3, 6, 2 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 1, 5 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 2, 6, 3, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline poly8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzipq_p8 (poly8x16_t __a, poly8x16_t __b) +{ + poly8x16x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 20, 4, 21, 5, 22, 6, 23, 7, 16, 0, 17, 1, 18, 2, 19, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 28, 12, 29, 13, 30, 14, 31, 15, 24, 8, 25, 9, 26, 10, 27, 11 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 }); +#endif + return __rv; +} + +__extension__ extern __inline poly16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzipq_p16 (poly16x8_t __a, poly16x8_t __b) +{ + poly16x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 10, 2, 11, 3, 8, 0, 9, 1 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 14, 6, 15, 7, 12, 4, 13, 5 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 0, 8, 1, 9, 2, 10, 3, 11 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 4, 12, 5, 13, 6, 14, 7, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline int8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzp_s8 (int8x8_t __a, int8x8_t __b) +{ + int8x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 9, 11, 13, 15, 1, 3, 5, 7 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 8, 10, 12, 14, 0, 2, 4, 6 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 0, 2, 4, 6, 8, 10, 12, 14 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 1, 3, 5, 7, 9, 11, 13, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline int16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzp_s16 (int16x4_t __a, int16x4_t __b) +{ + int16x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 7, 1, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 6, 0, 2 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 2, 4, 6 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 3, 5, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline int32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzp_s32 (int32x2_t __a, int32x2_t __b) +{ + int32x2x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 }); +#endif + return __rv; +} + +__extension__ extern __inline float32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzp_f32 (float32x2_t __a, float32x2_t __b) +{ + float32x2x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 }); +#endif + return __rv; +} + +__extension__ extern __inline uint8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzp_u8 (uint8x8_t __a, uint8x8_t __b) +{ + uint8x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 9, 11, 13, 15, 1, 3, 5, 7 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 8, 10, 12, 14, 0, 2, 4, 6 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 0, 2, 4, 6, 8, 10, 12, 14 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 1, 3, 5, 7, 9, 11, 13, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline uint16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzp_u16 (uint16x4_t __a, uint16x4_t __b) +{ + uint16x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 7, 1, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 6, 0, 2 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 2, 4, 6 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 3, 5, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline uint32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzp_u32 (uint32x2_t __a, uint32x2_t __b) +{ + uint32x2x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 }); +#endif + return __rv; +} + +__extension__ extern __inline poly8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzp_p8 (poly8x8_t __a, poly8x8_t __b) +{ + poly8x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 9, 11, 13, 15, 1, 3, 5, 7 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 8, 10, 12, 14, 0, 2, 4, 6 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 0, 2, 4, 6, 8, 10, 12, 14 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t) + { 1, 3, 5, 7, 9, 11, 13, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline poly16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzp_p16 (poly16x4_t __a, poly16x4_t __b) +{ + poly16x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 7, 1, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 6, 0, 2 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 2, 4, 6 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 3, 5, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline int8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzpq_s8 (int8x16_t __a, int8x16_t __b) +{ + int8x16x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 9, 11, 13, 15, 1, 3, 5, 7, 25, 27, 29, 31, 17, 19, 21, 23 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 8, 10, 12, 14, 0, 2, 4, 6, 24, 26, 28, 30, 16, 18, 20, 22 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 }); +#endif + return __rv; +} + +__extension__ extern __inline int16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzpq_s16 (int16x8_t __a, int16x8_t __b) +{ + int16x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 5, 7, 1, 3, 13, 15, 9, 11 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 4, 6, 0, 2, 12, 14, 8, 10 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 0, 2, 4, 6, 8, 10, 12, 14 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 1, 3, 5, 7, 9, 11, 13, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline int32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzpq_s32 (int32x4_t __a, int32x4_t __b) +{ + int32x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 3, 1, 7, 5 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 2, 0, 6, 4 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 2, 4, 6 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 3, 5, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline float32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzpq_f32 (float32x4_t __a, float32x4_t __b) +{ + float32x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 3, 1, 7, 5 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 2, 0, 6, 4 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 2, 4, 6 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 3, 5, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline uint8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzpq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + uint8x16x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 9, 11, 13, 15, 1, 3, 5, 7, 25, 27, 29, 31, 17, 19, 21, 23 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 8, 10, 12, 14, 0, 2, 4, 6, 24, 26, 28, 30, 16, 18, 20, 22 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 }); +#endif + return __rv; +} + +__extension__ extern __inline uint16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzpq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + uint16x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 5, 7, 1, 3, 13, 15, 9, 11 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 4, 6, 0, 2, 12, 14, 8, 10 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 0, 2, 4, 6, 8, 10, 12, 14 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 1, 3, 5, 7, 9, 11, 13, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline uint32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzpq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + uint32x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 3, 1, 7, 5 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 2, 0, 6, 4 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 2, 4, 6 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 3, 5, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline poly8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzpq_p8 (poly8x16_t __a, poly8x16_t __b) +{ + poly8x16x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 9, 11, 13, 15, 1, 3, 5, 7, 25, 27, 29, 31, 17, 19, 21, 23 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 8, 10, 12, 14, 0, 2, 4, 6, 24, 26, 28, 30, 16, 18, 20, 22 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t) + { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 }); +#endif + return __rv; +} + +__extension__ extern __inline poly16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzpq_p16 (poly16x8_t __a, poly16x8_t __b) +{ + poly16x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 5, 7, 1, 3, 13, 15, 9, 11 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 4, 6, 0, 2, 12, 14, 8, 10 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 0, 2, 4, 6, 8, 10, 12, 14 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 1, 3, 5, 7, 9, 11, 13, 15 }); +#endif + return __rv; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_p64 (const poly64_t * __a) +{ + return (poly64x1_t)__builtin_neon_vld1di ((const __builtin_neon_di *) __a); +} + +#pragma GCC pop_options +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_s8 (const int8_t * __a) +{ + return (int8x8_t)__builtin_neon_vld1v8qi ((const __builtin_neon_qi *) __a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_s16 (const int16_t * __a) +{ + return (int16x4_t)__builtin_neon_vld1v4hi ((const __builtin_neon_hi *) __a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_s32 (const int32_t * __a) +{ + return (int32x2_t)__builtin_neon_vld1v2si ((const __builtin_neon_si *) __a); +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_s64 (const int64_t * __a) +{ + return (int64x1_t)__builtin_neon_vld1di ((const __builtin_neon_di *) __a); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_f16 (const float16_t * __a) +{ + return __builtin_neon_vld1v4hf (__a); +} +#endif + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_f32 (const float32_t * __a) +{ + return (float32x2_t)__builtin_neon_vld1v2sf ((const __builtin_neon_sf *) __a); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_u8 (const uint8_t * __a) +{ + return (uint8x8_t)__builtin_neon_vld1v8qi ((const __builtin_neon_qi *) __a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_u16 (const uint16_t * __a) +{ + return (uint16x4_t)__builtin_neon_vld1v4hi ((const __builtin_neon_hi *) __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_u32 (const uint32_t * __a) +{ + return (uint32x2_t)__builtin_neon_vld1v2si ((const __builtin_neon_si *) __a); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_u64 (const uint64_t * __a) +{ + return (uint64x1_t)__builtin_neon_vld1di ((const __builtin_neon_di *) __a); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_p8 (const poly8_t * __a) +{ + return (poly8x8_t)__builtin_neon_vld1v8qi ((const __builtin_neon_qi *) __a); +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_p16 (const poly16_t * __a) +{ + return (poly16x4_t)__builtin_neon_vld1v4hi ((const __builtin_neon_hi *) __a); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_p64 (const poly64_t * __a) +{ + return (poly64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a); +} + +#pragma GCC pop_options +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_s8 (const int8_t * __a) +{ + return (int8x16_t)__builtin_neon_vld1v16qi ((const __builtin_neon_qi *) __a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_s16 (const int16_t * __a) +{ + return (int16x8_t)__builtin_neon_vld1v8hi ((const __builtin_neon_hi *) __a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_s32 (const int32_t * __a) +{ + return (int32x4_t)__builtin_neon_vld1v4si ((const __builtin_neon_si *) __a); +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_s64 (const int64_t * __a) +{ + return (int64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_f16 (const float16_t * __a) +{ + return __builtin_neon_vld1v8hf (__a); +} +#endif + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_f32 (const float32_t * __a) +{ + return (float32x4_t)__builtin_neon_vld1v4sf ((const __builtin_neon_sf *) __a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_u8 (const uint8_t * __a) +{ + return (uint8x16_t)__builtin_neon_vld1v16qi ((const __builtin_neon_qi *) __a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_u16 (const uint16_t * __a) +{ + return (uint16x8_t)__builtin_neon_vld1v8hi ((const __builtin_neon_hi *) __a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_u32 (const uint32_t * __a) +{ + return (uint32x4_t)__builtin_neon_vld1v4si ((const __builtin_neon_si *) __a); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_u64 (const uint64_t * __a) +{ + return (uint64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_p8 (const poly8_t * __a) +{ + return (poly8x16_t)__builtin_neon_vld1v16qi ((const __builtin_neon_qi *) __a); +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_p16 (const poly16_t * __a) +{ + return (poly16x8_t)__builtin_neon_vld1v8hi ((const __builtin_neon_hi *) __a); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_lane_s8 (const int8_t * __a, int8x8_t __b, const int __c) +{ + return (int8x8_t)__builtin_neon_vld1_lanev8qi ((const __builtin_neon_qi *) __a, __b, __c); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_lane_s16 (const int16_t * __a, int16x4_t __b, const int __c) +{ + return (int16x4_t)__builtin_neon_vld1_lanev4hi ((const __builtin_neon_hi *) __a, __b, __c); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_lane_s32 (const int32_t * __a, int32x2_t __b, const int __c) +{ + return (int32x2_t)__builtin_neon_vld1_lanev2si ((const __builtin_neon_si *) __a, __b, __c); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_lane_f16 (const float16_t * __a, float16x4_t __b, const int __c) +{ + return vset_lane_f16 (*__a, __b, __c); +} +#endif + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_lane_f32 (const float32_t * __a, float32x2_t __b, const int __c) +{ + return (float32x2_t)__builtin_neon_vld1_lanev2sf ((const __builtin_neon_sf *) __a, __b, __c); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_lane_u8 (const uint8_t * __a, uint8x8_t __b, const int __c) +{ + return (uint8x8_t)__builtin_neon_vld1_lanev8qi ((const __builtin_neon_qi *) __a, (int8x8_t) __b, __c); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_lane_u16 (const uint16_t * __a, uint16x4_t __b, const int __c) +{ + return (uint16x4_t)__builtin_neon_vld1_lanev4hi ((const __builtin_neon_hi *) __a, (int16x4_t) __b, __c); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_lane_u32 (const uint32_t * __a, uint32x2_t __b, const int __c) +{ + return (uint32x2_t)__builtin_neon_vld1_lanev2si ((const __builtin_neon_si *) __a, (int32x2_t) __b, __c); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_lane_p8 (const poly8_t * __a, poly8x8_t __b, const int __c) +{ + return (poly8x8_t)__builtin_neon_vld1_lanev8qi ((const __builtin_neon_qi *) __a, (int8x8_t) __b, __c); +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_lane_p16 (const poly16_t * __a, poly16x4_t __b, const int __c) +{ + return (poly16x4_t)__builtin_neon_vld1_lanev4hi ((const __builtin_neon_hi *) __a, (int16x4_t) __b, __c); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_lane_p64 (const poly64_t * __a, poly64x1_t __b, const int __c) +{ + return (poly64x1_t)__builtin_neon_vld1_lanedi ((const __builtin_neon_di *) __a, __b, __c); +} + +#pragma GCC pop_options +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_lane_s64 (const int64_t * __a, int64x1_t __b, const int __c) +{ + return (int64x1_t)__builtin_neon_vld1_lanedi ((const __builtin_neon_di *) __a, __b, __c); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_lane_u64 (const uint64_t * __a, uint64x1_t __b, const int __c) +{ + return (uint64x1_t)__builtin_neon_vld1_lanedi ((const __builtin_neon_di *) __a, (int64x1_t) __b, __c); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_lane_s8 (const int8_t * __a, int8x16_t __b, const int __c) +{ + return (int8x16_t)__builtin_neon_vld1_lanev16qi ((const __builtin_neon_qi *) __a, __b, __c); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_lane_s16 (const int16_t * __a, int16x8_t __b, const int __c) +{ + return (int16x8_t)__builtin_neon_vld1_lanev8hi ((const __builtin_neon_hi *) __a, __b, __c); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_lane_s32 (const int32_t * __a, int32x4_t __b, const int __c) +{ + return (int32x4_t)__builtin_neon_vld1_lanev4si ((const __builtin_neon_si *) __a, __b, __c); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_lane_f16 (const float16_t * __a, float16x8_t __b, const int __c) +{ + return vsetq_lane_f16 (*__a, __b, __c); +} +#endif + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_lane_f32 (const float32_t * __a, float32x4_t __b, const int __c) +{ + return (float32x4_t)__builtin_neon_vld1_lanev4sf ((const __builtin_neon_sf *) __a, __b, __c); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_lane_u8 (const uint8_t * __a, uint8x16_t __b, const int __c) +{ + return (uint8x16_t)__builtin_neon_vld1_lanev16qi ((const __builtin_neon_qi *) __a, (int8x16_t) __b, __c); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_lane_u16 (const uint16_t * __a, uint16x8_t __b, const int __c) +{ + return (uint16x8_t)__builtin_neon_vld1_lanev8hi ((const __builtin_neon_hi *) __a, (int16x8_t) __b, __c); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_lane_u32 (const uint32_t * __a, uint32x4_t __b, const int __c) +{ + return (uint32x4_t)__builtin_neon_vld1_lanev4si ((const __builtin_neon_si *) __a, (int32x4_t) __b, __c); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_lane_p8 (const poly8_t * __a, poly8x16_t __b, const int __c) +{ + return (poly8x16_t)__builtin_neon_vld1_lanev16qi ((const __builtin_neon_qi *) __a, (int8x16_t) __b, __c); +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_lane_p16 (const poly16_t * __a, poly16x8_t __b, const int __c) +{ + return (poly16x8_t)__builtin_neon_vld1_lanev8hi ((const __builtin_neon_hi *) __a, (int16x8_t) __b, __c); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_lane_p64 (const poly64_t * __a, poly64x2_t __b, const int __c) +{ + return (poly64x2_t)__builtin_neon_vld1_lanev2di ((const __builtin_neon_di *) __a, (int64x2_t) __b, __c); +} + +#pragma GCC pop_options +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_lane_s64 (const int64_t * __a, int64x2_t __b, const int __c) +{ + return (int64x2_t)__builtin_neon_vld1_lanev2di ((const __builtin_neon_di *) __a, __b, __c); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_lane_u64 (const uint64_t * __a, uint64x2_t __b, const int __c) +{ + return (uint64x2_t)__builtin_neon_vld1_lanev2di ((const __builtin_neon_di *) __a, (int64x2_t) __b, __c); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_dup_s8 (const int8_t * __a) +{ + return (int8x8_t)__builtin_neon_vld1_dupv8qi ((const __builtin_neon_qi *) __a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_dup_s16 (const int16_t * __a) +{ + return (int16x4_t)__builtin_neon_vld1_dupv4hi ((const __builtin_neon_hi *) __a); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_dup_s32 (const int32_t * __a) +{ + return (int32x2_t)__builtin_neon_vld1_dupv2si ((const __builtin_neon_si *) __a); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_dup_f16 (const float16_t * __a) +{ + float16_t __f = *__a; + return (float16x4_t) { __f, __f, __f, __f }; +} +#endif + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_dup_f32 (const float32_t * __a) +{ + return (float32x2_t)__builtin_neon_vld1_dupv2sf ((const __builtin_neon_sf *) __a); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_dup_u8 (const uint8_t * __a) +{ + return (uint8x8_t)__builtin_neon_vld1_dupv8qi ((const __builtin_neon_qi *) __a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_dup_u16 (const uint16_t * __a) +{ + return (uint16x4_t)__builtin_neon_vld1_dupv4hi ((const __builtin_neon_hi *) __a); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_dup_u32 (const uint32_t * __a) +{ + return (uint32x2_t)__builtin_neon_vld1_dupv2si ((const __builtin_neon_si *) __a); +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_dup_p8 (const poly8_t * __a) +{ + return (poly8x8_t)__builtin_neon_vld1_dupv8qi ((const __builtin_neon_qi *) __a); +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_dup_p16 (const poly16_t * __a) +{ + return (poly16x4_t)__builtin_neon_vld1_dupv4hi ((const __builtin_neon_hi *) __a); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_dup_p64 (const poly64_t * __a) +{ + return (poly64x1_t)__builtin_neon_vld1_dupdi ((const __builtin_neon_di *) __a); +} + +#pragma GCC pop_options +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_dup_s64 (const int64_t * __a) +{ + return (int64x1_t)__builtin_neon_vld1_dupdi ((const __builtin_neon_di *) __a); +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_dup_u64 (const uint64_t * __a) +{ + return (uint64x1_t)__builtin_neon_vld1_dupdi ((const __builtin_neon_di *) __a); +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_dup_s8 (const int8_t * __a) +{ + return (int8x16_t)__builtin_neon_vld1_dupv16qi ((const __builtin_neon_qi *) __a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_dup_s16 (const int16_t * __a) +{ + return (int16x8_t)__builtin_neon_vld1_dupv8hi ((const __builtin_neon_hi *) __a); +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_dup_s32 (const int32_t * __a) +{ + return (int32x4_t)__builtin_neon_vld1_dupv4si ((const __builtin_neon_si *) __a); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_dup_f16 (const float16_t * __a) +{ + float16_t __f = *__a; + return (float16x8_t) { __f, __f, __f, __f, __f, __f, __f, __f }; +} +#endif + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_dup_f32 (const float32_t * __a) +{ + return (float32x4_t)__builtin_neon_vld1_dupv4sf ((const __builtin_neon_sf *) __a); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_dup_u8 (const uint8_t * __a) +{ + return (uint8x16_t)__builtin_neon_vld1_dupv16qi ((const __builtin_neon_qi *) __a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_dup_u16 (const uint16_t * __a) +{ + return (uint16x8_t)__builtin_neon_vld1_dupv8hi ((const __builtin_neon_hi *) __a); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_dup_u32 (const uint32_t * __a) +{ + return (uint32x4_t)__builtin_neon_vld1_dupv4si ((const __builtin_neon_si *) __a); +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_dup_p8 (const poly8_t * __a) +{ + return (poly8x16_t)__builtin_neon_vld1_dupv16qi ((const __builtin_neon_qi *) __a); +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_dup_p16 (const poly16_t * __a) +{ + return (poly16x8_t)__builtin_neon_vld1_dupv8hi ((const __builtin_neon_hi *) __a); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_dup_p64 (const poly64_t * __a) +{ + return (poly64x2_t)__builtin_neon_vld1_dupv2di ((const __builtin_neon_di *) __a); +} + +#pragma GCC pop_options +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_dup_s64 (const int64_t * __a) +{ + return (int64x2_t)__builtin_neon_vld1_dupv2di ((const __builtin_neon_di *) __a); +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_dup_u64 (const uint64_t * __a) +{ + return (uint64x2_t)__builtin_neon_vld1_dupv2di ((const __builtin_neon_di *) __a); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_p64 (poly64_t * __a, poly64x1_t __b) +{ + __builtin_neon_vst1di ((__builtin_neon_di *) __a, __b); +} + +#pragma GCC pop_options +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_s8 (int8_t * __a, int8x8_t __b) +{ + __builtin_neon_vst1v8qi ((__builtin_neon_qi *) __a, __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_s16 (int16_t * __a, int16x4_t __b) +{ + __builtin_neon_vst1v4hi ((__builtin_neon_hi *) __a, __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_s32 (int32_t * __a, int32x2_t __b) +{ + __builtin_neon_vst1v2si ((__builtin_neon_si *) __a, __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_s64 (int64_t * __a, int64x1_t __b) +{ + __builtin_neon_vst1di ((__builtin_neon_di *) __a, __b); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_f16 (float16_t * __a, float16x4_t __b) +{ + __builtin_neon_vst1v4hf (__a, __b); +} +#endif + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_f32 (float32_t * __a, float32x2_t __b) +{ + __builtin_neon_vst1v2sf ((__builtin_neon_sf *) __a, __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_u8 (uint8_t * __a, uint8x8_t __b) +{ + __builtin_neon_vst1v8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_u16 (uint16_t * __a, uint16x4_t __b) +{ + __builtin_neon_vst1v4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_u32 (uint32_t * __a, uint32x2_t __b) +{ + __builtin_neon_vst1v2si ((__builtin_neon_si *) __a, (int32x2_t) __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_u64 (uint64_t * __a, uint64x1_t __b) +{ + __builtin_neon_vst1di ((__builtin_neon_di *) __a, (int64x1_t) __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_p8 (poly8_t * __a, poly8x8_t __b) +{ + __builtin_neon_vst1v8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_p16 (poly16_t * __a, poly16x4_t __b) +{ + __builtin_neon_vst1v4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_p64 (poly64_t * __a, poly64x2_t __b) +{ + __builtin_neon_vst1v2di ((__builtin_neon_di *) __a, (int64x2_t) __b); +} + +#pragma GCC pop_options +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_s8 (int8_t * __a, int8x16_t __b) +{ + __builtin_neon_vst1v16qi ((__builtin_neon_qi *) __a, __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_s16 (int16_t * __a, int16x8_t __b) +{ + __builtin_neon_vst1v8hi ((__builtin_neon_hi *) __a, __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_s32 (int32_t * __a, int32x4_t __b) +{ + __builtin_neon_vst1v4si ((__builtin_neon_si *) __a, __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_s64 (int64_t * __a, int64x2_t __b) +{ + __builtin_neon_vst1v2di ((__builtin_neon_di *) __a, __b); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_f16 (float16_t * __a, float16x8_t __b) +{ + __builtin_neon_vst1v8hf (__a, __b); +} +#endif + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_f32 (float32_t * __a, float32x4_t __b) +{ + __builtin_neon_vst1v4sf ((__builtin_neon_sf *) __a, __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_u8 (uint8_t * __a, uint8x16_t __b) +{ + __builtin_neon_vst1v16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_u16 (uint16_t * __a, uint16x8_t __b) +{ + __builtin_neon_vst1v8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_u32 (uint32_t * __a, uint32x4_t __b) +{ + __builtin_neon_vst1v4si ((__builtin_neon_si *) __a, (int32x4_t) __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_u64 (uint64_t * __a, uint64x2_t __b) +{ + __builtin_neon_vst1v2di ((__builtin_neon_di *) __a, (int64x2_t) __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_p8 (poly8_t * __a, poly8x16_t __b) +{ + __builtin_neon_vst1v16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_p16 (poly16_t * __a, poly16x8_t __b) +{ + __builtin_neon_vst1v8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_lane_s8 (int8_t * __a, int8x8_t __b, const int __c) +{ + __builtin_neon_vst1_lanev8qi ((__builtin_neon_qi *) __a, __b, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_lane_s16 (int16_t * __a, int16x4_t __b, const int __c) +{ + __builtin_neon_vst1_lanev4hi ((__builtin_neon_hi *) __a, __b, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_lane_s32 (int32_t * __a, int32x2_t __b, const int __c) +{ + __builtin_neon_vst1_lanev2si ((__builtin_neon_si *) __a, __b, __c); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_lane_f16 (float16_t * __a, float16x4_t __b, const int __c) +{ + __builtin_neon_vst1_lanev4hf (__a, __b, __c); +} +#endif + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_lane_f32 (float32_t * __a, float32x2_t __b, const int __c) +{ + __builtin_neon_vst1_lanev2sf ((__builtin_neon_sf *) __a, __b, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_lane_u8 (uint8_t * __a, uint8x8_t __b, const int __c) +{ + __builtin_neon_vst1_lanev8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_lane_u16 (uint16_t * __a, uint16x4_t __b, const int __c) +{ + __builtin_neon_vst1_lanev4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_lane_u32 (uint32_t * __a, uint32x2_t __b, const int __c) +{ + __builtin_neon_vst1_lanev2si ((__builtin_neon_si *) __a, (int32x2_t) __b, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_lane_p8 (poly8_t * __a, poly8x8_t __b, const int __c) +{ + __builtin_neon_vst1_lanev8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_lane_p16 (poly16_t * __a, poly16x4_t __b, const int __c) +{ + __builtin_neon_vst1_lanev4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b, __c); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_lane_p64 (poly64_t * __a, poly64x1_t __b, const int __c) +{ + __builtin_neon_vst1_lanedi ((__builtin_neon_di *) __a, __b, __c); +} + +#pragma GCC pop_options +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_lane_s64 (int64_t * __a, int64x1_t __b, const int __c) +{ + __builtin_neon_vst1_lanedi ((__builtin_neon_di *) __a, __b, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_lane_u64 (uint64_t * __a, uint64x1_t __b, const int __c) +{ + __builtin_neon_vst1_lanedi ((__builtin_neon_di *) __a, (int64x1_t) __b, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_lane_s8 (int8_t * __a, int8x16_t __b, const int __c) +{ + __builtin_neon_vst1_lanev16qi ((__builtin_neon_qi *) __a, __b, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_lane_s16 (int16_t * __a, int16x8_t __b, const int __c) +{ + __builtin_neon_vst1_lanev8hi ((__builtin_neon_hi *) __a, __b, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_lane_s32 (int32_t * __a, int32x4_t __b, const int __c) +{ + __builtin_neon_vst1_lanev4si ((__builtin_neon_si *) __a, __b, __c); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_lane_f16 (float16_t * __a, float16x8_t __b, const int __c) +{ + __builtin_neon_vst1_lanev8hf (__a, __b, __c); +} +#endif + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_lane_f32 (float32_t * __a, float32x4_t __b, const int __c) +{ + __builtin_neon_vst1_lanev4sf ((__builtin_neon_sf *) __a, __b, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_lane_u8 (uint8_t * __a, uint8x16_t __b, const int __c) +{ + __builtin_neon_vst1_lanev16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_lane_u16 (uint16_t * __a, uint16x8_t __b, const int __c) +{ + __builtin_neon_vst1_lanev8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_lane_u32 (uint32_t * __a, uint32x4_t __b, const int __c) +{ + __builtin_neon_vst1_lanev4si ((__builtin_neon_si *) __a, (int32x4_t) __b, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_lane_p8 (poly8_t * __a, poly8x16_t __b, const int __c) +{ + __builtin_neon_vst1_lanev16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_lane_p16 (poly16_t * __a, poly16x8_t __b, const int __c) +{ + __builtin_neon_vst1_lanev8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b, __c); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_lane_p64 (poly64_t * __a, poly64x2_t __b, const int __c) +{ + __builtin_neon_vst1_lanev2di ((__builtin_neon_di *) __a, (int64x2_t) __b, __c); +} + +#pragma GCC pop_options +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_lane_s64 (int64_t * __a, int64x2_t __b, const int __c) +{ + __builtin_neon_vst1_lanev2di ((__builtin_neon_di *) __a, __b, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_lane_u64 (uint64_t * __a, uint64x2_t __b, const int __c) +{ + __builtin_neon_vst1_lanev2di ((__builtin_neon_di *) __a, (int64x2_t) __b, __c); +} + +__extension__ extern __inline int8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_s8 (const int8_t * __a) +{ + union { int8x8x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2v8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_s16 (const int16_t * __a) +{ + union { int16x4x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2v4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_s32 (const int32_t * __a) +{ + union { int32x2x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2v2si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_f16 (const float16_t * __a) +{ + union { float16x4x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2v4hf (__a); + return __rv.__i; +} +#endif + +__extension__ extern __inline float32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_f32 (const float32_t * __a) +{ + union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2v2sf ((const __builtin_neon_sf *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_u8 (const uint8_t * __a) +{ + union { uint8x8x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2v8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_u16 (const uint16_t * __a) +{ + union { uint16x4x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2v4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_u32 (const uint32_t * __a) +{ + union { uint32x2x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2v2si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_p8 (const poly8_t * __a) +{ + union { poly8x8x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2v8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_p16 (const poly16_t * __a) +{ + union { poly16x4x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2v4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_p64 (const poly64_t * __a) +{ + union { poly64x1x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2di ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +#pragma GCC pop_options +__extension__ extern __inline int64x1x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_s64 (const int64_t * __a) +{ + union { int64x1x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2di ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint64x1x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_u64 (const uint64_t * __a) +{ + union { uint64x1x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2di ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +__extension__ extern __inline int8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2q_s8 (const int8_t * __a) +{ + union { int8x16x2_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld2v16qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2q_s16 (const int16_t * __a) +{ + union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld2v8hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2q_s32 (const int32_t * __a) +{ + union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld2v4si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2q_f16 (const float16_t * __a) +{ + union { float16x8x2_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld2v8hf (__a); + return __rv.__i; +} +#endif + +__extension__ extern __inline float32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2q_f32 (const float32_t * __a) +{ + union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld2v4sf ((const __builtin_neon_sf *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2q_u8 (const uint8_t * __a) +{ + union { uint8x16x2_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld2v16qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2q_u16 (const uint16_t * __a) +{ + union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld2v8hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2q_u32 (const uint32_t * __a) +{ + union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld2v4si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2q_p8 (const poly8_t * __a) +{ + union { poly8x16x2_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld2v16qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2q_p16 (const poly16_t * __a) +{ + union { poly16x8x2_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld2v8hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_lane_s8 (const int8_t * __a, int8x8x2_t __b, const int __c) +{ + union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + union { int8x8x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline int16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_lane_s16 (const int16_t * __a, int16x4x2_t __b, const int __c) +{ + union { int16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + union { int16x4x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline int32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_lane_s32 (const int32_t * __a, int32x2x2_t __b, const int __c) +{ + union { int32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + union { int32x2x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c); + return __rv.__i; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_lane_f16 (const float16_t * __a, float16x4x2_t __b, const int __c) +{ + union { float16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + union { float16x4x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_lanev4hf ( __a, __bu.__o, __c); + return __rv.__i; +} +#endif + +__extension__ extern __inline float32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_lane_f32 (const float32_t * __a, float32x2x2_t __b, const int __c) +{ + union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_lanev2sf ((const __builtin_neon_sf *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline uint8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_lane_u8 (const uint8_t * __a, uint8x8x2_t __b, const int __c) +{ + union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + union { uint8x8x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline uint16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_lane_u16 (const uint16_t * __a, uint16x4x2_t __b, const int __c) +{ + union { uint16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + union { uint16x4x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline uint32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_lane_u32 (const uint32_t * __a, uint32x2x2_t __b, const int __c) +{ + union { uint32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + union { uint32x2x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline poly8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_lane_p8 (const poly8_t * __a, poly8x8x2_t __b, const int __c) +{ + union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + union { poly8x8x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline poly16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_lane_p16 (const poly16_t * __a, poly16x4x2_t __b, const int __c) +{ + union { poly16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + union { poly16x4x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline int16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2q_lane_s16 (const int16_t * __a, int16x8x2_t __b, const int __c) +{ + union { int16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld2_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline int32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2q_lane_s32 (const int32_t * __a, int32x4x2_t __b, const int __c) +{ + union { int32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld2_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c); + return __rv.__i; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2q_lane_f16 (const float16_t * __a, float16x8x2_t __b, const int __c) +{ + union { float16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + union { float16x8x2_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld2_lanev8hf (__a, __bu.__o, __c); + return __rv.__i; +} +#endif + +__extension__ extern __inline float32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2q_lane_f32 (const float32_t * __a, float32x4x2_t __b, const int __c) +{ + union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld2_lanev4sf ((const __builtin_neon_sf *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline uint16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2q_lane_u16 (const uint16_t * __a, uint16x8x2_t __b, const int __c) +{ + union { uint16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld2_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline uint32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2q_lane_u32 (const uint32_t * __a, uint32x4x2_t __b, const int __c) +{ + union { uint32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld2_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline poly16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2q_lane_p16 (const poly16_t * __a, poly16x8x2_t __b, const int __c) +{ + union { poly16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + union { poly16x8x2_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld2_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline int8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_dup_s8 (const int8_t * __a) +{ + union { int8x8x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_dupv8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_dup_s16 (const int16_t * __a) +{ + union { int16x4x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_dupv4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_dup_s32 (const int32_t * __a) +{ + union { int32x2x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_dupv2si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_dup_f16 (const float16_t * __a) +{ + union { float16x4x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_dupv4hf (__a); + return __rv.__i; +} +#endif + +__extension__ extern __inline float32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_dup_f32 (const float32_t * __a) +{ + union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_dupv2sf ((const __builtin_neon_sf *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_dup_u8 (const uint8_t * __a) +{ + union { uint8x8x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_dupv8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_dup_u16 (const uint16_t * __a) +{ + union { uint16x4x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_dupv4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_dup_u32 (const uint32_t * __a) +{ + union { uint32x2x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_dupv2si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_dup_p8 (const poly8_t * __a) +{ + union { poly8x8x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_dupv8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_dup_p16 (const poly16_t * __a) +{ + union { poly16x4x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_dupv4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_dup_p64 (const poly64_t * __a) +{ + union { poly64x1x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_dupdi ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +#pragma GCC pop_options +__extension__ extern __inline int64x1x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_dup_s64 (const int64_t * __a) +{ + union { int64x1x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_dupdi ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint64x1x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld2_dup_u64 (const uint64_t * __a) +{ + union { uint64x1x2_t __i; __builtin_neon_ti __o; } __rv; + __rv.__o = __builtin_neon_vld2_dupdi ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_s8 (int8_t * __a, int8x8x2_t __b) +{ + union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2v8qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_s16 (int16_t * __a, int16x4x2_t __b) +{ + union { int16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2v4hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_s32 (int32_t * __a, int32x2x2_t __b) +{ + union { int32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2v2si ((__builtin_neon_si *) __a, __bu.__o); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_f16 (float16_t * __a, float16x4x2_t __b) +{ + union { float16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2v4hf (__a, __bu.__o); +} +#endif + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_f32 (float32_t * __a, float32x2x2_t __b) +{ + union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2v2sf ((__builtin_neon_sf *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_u8 (uint8_t * __a, uint8x8x2_t __b) +{ + union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2v8qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_u16 (uint16_t * __a, uint16x4x2_t __b) +{ + union { uint16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2v4hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_u32 (uint32_t * __a, uint32x2x2_t __b) +{ + union { uint32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2v2si ((__builtin_neon_si *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_p8 (poly8_t * __a, poly8x8x2_t __b) +{ + union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2v8qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_p16 (poly16_t * __a, poly16x4x2_t __b) +{ + union { poly16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2v4hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_p64 (poly64_t * __a, poly64x1x2_t __b) +{ + union { poly64x1x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2di ((__builtin_neon_di *) __a, __bu.__o); +} + +#pragma GCC pop_options +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_s64 (int64_t * __a, int64x1x2_t __b) +{ + union { int64x1x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2di ((__builtin_neon_di *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_u64 (uint64_t * __a, uint64x1x2_t __b) +{ + union { uint64x1x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2di ((__builtin_neon_di *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2q_s8 (int8_t * __a, int8x16x2_t __b) +{ + union { int8x16x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst2v16qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2q_s16 (int16_t * __a, int16x8x2_t __b) +{ + union { int16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst2v8hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2q_s32 (int32_t * __a, int32x4x2_t __b) +{ + union { int32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst2v4si ((__builtin_neon_si *) __a, __bu.__o); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2q_f16 (float16_t * __a, float16x8x2_t __b) +{ + union { float16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst2v8hf (__a, __bu.__o); +} +#endif + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2q_f32 (float32_t * __a, float32x4x2_t __b) +{ + union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst2v4sf ((__builtin_neon_sf *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2q_u8 (uint8_t * __a, uint8x16x2_t __b) +{ + union { uint8x16x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst2v16qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2q_u16 (uint16_t * __a, uint16x8x2_t __b) +{ + union { uint16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst2v8hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2q_u32 (uint32_t * __a, uint32x4x2_t __b) +{ + union { uint32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst2v4si ((__builtin_neon_si *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2q_p8 (poly8_t * __a, poly8x16x2_t __b) +{ + union { poly8x16x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst2v16qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2q_p16 (poly16_t * __a, poly16x8x2_t __b) +{ + union { poly16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst2v8hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_lane_s8 (int8_t * __a, int8x8x2_t __b, const int __c) +{ + union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_lane_s16 (int16_t * __a, int16x4x2_t __b, const int __c) +{ + union { int16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_lane_s32 (int32_t * __a, int32x2x2_t __b, const int __c) +{ + union { int32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_lane_f16 (float16_t * __a, float16x4x2_t __b, const int __c) +{ + union { float16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2_lanev4hf (__a, __bu.__o, __c); +} +#endif + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_lane_f32 (float32_t * __a, float32x2x2_t __b, const int __c) +{ + union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2_lanev2sf ((__builtin_neon_sf *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_lane_u8 (uint8_t * __a, uint8x8x2_t __b, const int __c) +{ + union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_lane_u16 (uint16_t * __a, uint16x4x2_t __b, const int __c) +{ + union { uint16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_lane_u32 (uint32_t * __a, uint32x2x2_t __b, const int __c) +{ + union { uint32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_lane_p8 (poly8_t * __a, poly8x8x2_t __b, const int __c) +{ + union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2_lane_p16 (poly16_t * __a, poly16x4x2_t __b, const int __c) +{ + union { poly16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b }; + __builtin_neon_vst2_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2q_lane_s16 (int16_t * __a, int16x8x2_t __b, const int __c) +{ + union { int16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst2_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2q_lane_s32 (int32_t * __a, int32x4x2_t __b, const int __c) +{ + union { int32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst2_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2q_lane_f16 (float16_t * __a, float16x8x2_t __b, const int __c) +{ + union { float16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst2_lanev8hf (__a, __bu.__o, __c); +} +#endif + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2q_lane_f32 (float32_t * __a, float32x4x2_t __b, const int __c) +{ + union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst2_lanev4sf ((__builtin_neon_sf *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2q_lane_u16 (uint16_t * __a, uint16x8x2_t __b, const int __c) +{ + union { uint16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst2_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2q_lane_u32 (uint32_t * __a, uint32x4x2_t __b, const int __c) +{ + union { uint32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst2_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst2q_lane_p16 (poly16_t * __a, poly16x8x2_t __b, const int __c) +{ + union { poly16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst2_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline int8x8x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_s8 (const int8_t * __a) +{ + union { int8x8x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3v8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int16x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_s16 (const int16_t * __a) +{ + union { int16x4x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3v4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int32x2x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_s32 (const int32_t * __a) +{ + union { int32x2x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3v2si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_f16 (const float16_t * __a) +{ + union { float16x4x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3v4hf (__a); + return __rv.__i; +} +#endif + +__extension__ extern __inline float32x2x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_f32 (const float32_t * __a) +{ + union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3v2sf ((const __builtin_neon_sf *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint8x8x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_u8 (const uint8_t * __a) +{ + union { uint8x8x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3v8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint16x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_u16 (const uint16_t * __a) +{ + union { uint16x4x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3v4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint32x2x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_u32 (const uint32_t * __a) +{ + union { uint32x2x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3v2si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly8x8x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_p8 (const poly8_t * __a) +{ + union { poly8x8x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3v8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly16x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_p16 (const poly16_t * __a) +{ + union { poly16x4x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3v4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_p64 (const poly64_t * __a) +{ + union { poly64x1x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3di ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +#pragma GCC pop_options +__extension__ extern __inline int64x1x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_s64 (const int64_t * __a) +{ + union { int64x1x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3di ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint64x1x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_u64 (const uint64_t * __a) +{ + union { uint64x1x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3di ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +__extension__ extern __inline int8x16x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3q_s8 (const int8_t * __a) +{ + union { int8x16x3_t __i; __builtin_neon_ci __o; } __rv; + __rv.__o = __builtin_neon_vld3v16qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int16x8x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3q_s16 (const int16_t * __a) +{ + union { int16x8x3_t __i; __builtin_neon_ci __o; } __rv; + __rv.__o = __builtin_neon_vld3v8hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int32x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3q_s32 (const int32_t * __a) +{ + union { int32x4x3_t __i; __builtin_neon_ci __o; } __rv; + __rv.__o = __builtin_neon_vld3v4si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3q_f16 (const float16_t * __a) +{ + union { float16x8x3_t __i; __builtin_neon_ci __o; } __rv; + __rv.__o = __builtin_neon_vld3v8hf (__a); + return __rv.__i; +} +#endif + +__extension__ extern __inline float32x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3q_f32 (const float32_t * __a) +{ + union { float32x4x3_t __i; __builtin_neon_ci __o; } __rv; + __rv.__o = __builtin_neon_vld3v4sf ((const __builtin_neon_sf *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint8x16x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3q_u8 (const uint8_t * __a) +{ + union { uint8x16x3_t __i; __builtin_neon_ci __o; } __rv; + __rv.__o = __builtin_neon_vld3v16qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint16x8x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3q_u16 (const uint16_t * __a) +{ + union { uint16x8x3_t __i; __builtin_neon_ci __o; } __rv; + __rv.__o = __builtin_neon_vld3v8hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint32x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3q_u32 (const uint32_t * __a) +{ + union { uint32x4x3_t __i; __builtin_neon_ci __o; } __rv; + __rv.__o = __builtin_neon_vld3v4si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly8x16x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3q_p8 (const poly8_t * __a) +{ + union { poly8x16x3_t __i; __builtin_neon_ci __o; } __rv; + __rv.__o = __builtin_neon_vld3v16qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly16x8x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3q_p16 (const poly16_t * __a) +{ + union { poly16x8x3_t __i; __builtin_neon_ci __o; } __rv; + __rv.__o = __builtin_neon_vld3v8hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int8x8x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_lane_s8 (const int8_t * __a, int8x8x3_t __b, const int __c) +{ + union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + union { int8x8x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline int16x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_lane_s16 (const int16_t * __a, int16x4x3_t __b, const int __c) +{ + union { int16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + union { int16x4x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline int32x2x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_lane_s32 (const int32_t * __a, int32x2x3_t __b, const int __c) +{ + union { int32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + union { int32x2x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c); + return __rv.__i; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_lane_f16 (const float16_t * __a, float16x4x3_t __b, const int __c) +{ + union { float16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + union { float16x4x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_lanev4hf (__a, __bu.__o, __c); + return __rv.__i; +} +#endif + +__extension__ extern __inline float32x2x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_lane_f32 (const float32_t * __a, float32x2x3_t __b, const int __c) +{ + union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_lanev2sf ((const __builtin_neon_sf *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline uint8x8x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_lane_u8 (const uint8_t * __a, uint8x8x3_t __b, const int __c) +{ + union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + union { uint8x8x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline uint16x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_lane_u16 (const uint16_t * __a, uint16x4x3_t __b, const int __c) +{ + union { uint16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + union { uint16x4x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline uint32x2x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_lane_u32 (const uint32_t * __a, uint32x2x3_t __b, const int __c) +{ + union { uint32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + union { uint32x2x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline poly8x8x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_lane_p8 (const poly8_t * __a, poly8x8x3_t __b, const int __c) +{ + union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + union { poly8x8x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline poly16x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_lane_p16 (const poly16_t * __a, poly16x4x3_t __b, const int __c) +{ + union { poly16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + union { poly16x4x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline int16x8x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3q_lane_s16 (const int16_t * __a, int16x8x3_t __b, const int __c) +{ + union { int16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + union { int16x8x3_t __i; __builtin_neon_ci __o; } __rv; + __rv.__o = __builtin_neon_vld3_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline int32x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3q_lane_s32 (const int32_t * __a, int32x4x3_t __b, const int __c) +{ + union { int32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + union { int32x4x3_t __i; __builtin_neon_ci __o; } __rv; + __rv.__o = __builtin_neon_vld3_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c); + return __rv.__i; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3q_lane_f16 (const float16_t * __a, float16x8x3_t __b, const int __c) +{ + union { float16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + union { float16x8x3_t __i; __builtin_neon_ci __o; } __rv; + __rv.__o = __builtin_neon_vld3_lanev8hf (__a, __bu.__o, __c); + return __rv.__i; +} +#endif + +__extension__ extern __inline float32x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3q_lane_f32 (const float32_t * __a, float32x4x3_t __b, const int __c) +{ + union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + union { float32x4x3_t __i; __builtin_neon_ci __o; } __rv; + __rv.__o = __builtin_neon_vld3_lanev4sf ((const __builtin_neon_sf *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline uint16x8x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3q_lane_u16 (const uint16_t * __a, uint16x8x3_t __b, const int __c) +{ + union { uint16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + union { uint16x8x3_t __i; __builtin_neon_ci __o; } __rv; + __rv.__o = __builtin_neon_vld3_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline uint32x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3q_lane_u32 (const uint32_t * __a, uint32x4x3_t __b, const int __c) +{ + union { uint32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + union { uint32x4x3_t __i; __builtin_neon_ci __o; } __rv; + __rv.__o = __builtin_neon_vld3_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline poly16x8x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3q_lane_p16 (const poly16_t * __a, poly16x8x3_t __b, const int __c) +{ + union { poly16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + union { poly16x8x3_t __i; __builtin_neon_ci __o; } __rv; + __rv.__o = __builtin_neon_vld3_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline int8x8x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_dup_s8 (const int8_t * __a) +{ + union { int8x8x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_dupv8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int16x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_dup_s16 (const int16_t * __a) +{ + union { int16x4x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_dupv4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int32x2x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_dup_s32 (const int32_t * __a) +{ + union { int32x2x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_dupv2si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_dup_f16 (const float16_t * __a) +{ + union { float16x4x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_dupv4hf (__a); + return __rv.__i; +} +#endif + +__extension__ extern __inline float32x2x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_dup_f32 (const float32_t * __a) +{ + union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_dupv2sf ((const __builtin_neon_sf *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint8x8x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_dup_u8 (const uint8_t * __a) +{ + union { uint8x8x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_dupv8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint16x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_dup_u16 (const uint16_t * __a) +{ + union { uint16x4x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_dupv4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint32x2x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_dup_u32 (const uint32_t * __a) +{ + union { uint32x2x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_dupv2si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly8x8x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_dup_p8 (const poly8_t * __a) +{ + union { poly8x8x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_dupv8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly16x4x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_dup_p16 (const poly16_t * __a) +{ + union { poly16x4x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_dupv4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_dup_p64 (const poly64_t * __a) +{ + union { poly64x1x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_dupdi ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +#pragma GCC pop_options +__extension__ extern __inline int64x1x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_dup_s64 (const int64_t * __a) +{ + union { int64x1x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_dupdi ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint64x1x3_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld3_dup_u64 (const uint64_t * __a) +{ + union { uint64x1x3_t __i; __builtin_neon_ei __o; } __rv; + __rv.__o = __builtin_neon_vld3_dupdi ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_s8 (int8_t * __a, int8x8x3_t __b) +{ + union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3v8qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_s16 (int16_t * __a, int16x4x3_t __b) +{ + union { int16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3v4hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_s32 (int32_t * __a, int32x2x3_t __b) +{ + union { int32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3v2si ((__builtin_neon_si *) __a, __bu.__o); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_f16 (float16_t * __a, float16x4x3_t __b) +{ + union { float16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3v4hf (__a, __bu.__o); +} +#endif + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_f32 (float32_t * __a, float32x2x3_t __b) +{ + union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3v2sf ((__builtin_neon_sf *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_u8 (uint8_t * __a, uint8x8x3_t __b) +{ + union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3v8qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_u16 (uint16_t * __a, uint16x4x3_t __b) +{ + union { uint16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3v4hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_u32 (uint32_t * __a, uint32x2x3_t __b) +{ + union { uint32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3v2si ((__builtin_neon_si *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_p8 (poly8_t * __a, poly8x8x3_t __b) +{ + union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3v8qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_p16 (poly16_t * __a, poly16x4x3_t __b) +{ + union { poly16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3v4hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_p64 (poly64_t * __a, poly64x1x3_t __b) +{ + union { poly64x1x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3di ((__builtin_neon_di *) __a, __bu.__o); +} + +#pragma GCC pop_options +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_s64 (int64_t * __a, int64x1x3_t __b) +{ + union { int64x1x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3di ((__builtin_neon_di *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_u64 (uint64_t * __a, uint64x1x3_t __b) +{ + union { uint64x1x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3di ((__builtin_neon_di *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3q_s8 (int8_t * __a, int8x16x3_t __b) +{ + union { int8x16x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + __builtin_neon_vst3v16qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3q_s16 (int16_t * __a, int16x8x3_t __b) +{ + union { int16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + __builtin_neon_vst3v8hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3q_s32 (int32_t * __a, int32x4x3_t __b) +{ + union { int32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + __builtin_neon_vst3v4si ((__builtin_neon_si *) __a, __bu.__o); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3q_f16 (float16_t * __a, float16x8x3_t __b) +{ + union { float16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + __builtin_neon_vst3v8hf (__a, __bu.__o); +} +#endif + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3q_f32 (float32_t * __a, float32x4x3_t __b) +{ + union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + __builtin_neon_vst3v4sf ((__builtin_neon_sf *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3q_u8 (uint8_t * __a, uint8x16x3_t __b) +{ + union { uint8x16x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + __builtin_neon_vst3v16qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3q_u16 (uint16_t * __a, uint16x8x3_t __b) +{ + union { uint16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + __builtin_neon_vst3v8hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3q_u32 (uint32_t * __a, uint32x4x3_t __b) +{ + union { uint32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + __builtin_neon_vst3v4si ((__builtin_neon_si *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3q_p8 (poly8_t * __a, poly8x16x3_t __b) +{ + union { poly8x16x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + __builtin_neon_vst3v16qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3q_p16 (poly16_t * __a, poly16x8x3_t __b) +{ + union { poly16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + __builtin_neon_vst3v8hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_lane_s8 (int8_t * __a, int8x8x3_t __b, const int __c) +{ + union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_lane_s16 (int16_t * __a, int16x4x3_t __b, const int __c) +{ + union { int16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_lane_s32 (int32_t * __a, int32x2x3_t __b, const int __c) +{ + union { int32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_lane_f16 (float16_t * __a, float16x4x3_t __b, const int __c) +{ + union { float16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3_lanev4hf (__a, __bu.__o, __c); +} +#endif + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_lane_f32 (float32_t * __a, float32x2x3_t __b, const int __c) +{ + union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3_lanev2sf ((__builtin_neon_sf *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_lane_u8 (uint8_t * __a, uint8x8x3_t __b, const int __c) +{ + union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_lane_u16 (uint16_t * __a, uint16x4x3_t __b, const int __c) +{ + union { uint16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_lane_u32 (uint32_t * __a, uint32x2x3_t __b, const int __c) +{ + union { uint32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_lane_p8 (poly8_t * __a, poly8x8x3_t __b, const int __c) +{ + union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3_lane_p16 (poly16_t * __a, poly16x4x3_t __b, const int __c) +{ + union { poly16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b }; + __builtin_neon_vst3_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3q_lane_s16 (int16_t * __a, int16x8x3_t __b, const int __c) +{ + union { int16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + __builtin_neon_vst3_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3q_lane_s32 (int32_t * __a, int32x4x3_t __b, const int __c) +{ + union { int32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + __builtin_neon_vst3_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3q_lane_f16 (float16_t * __a, float16x8x3_t __b, const int __c) +{ + union { float16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + __builtin_neon_vst3_lanev8hf (__a, __bu.__o, __c); +} +#endif + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3q_lane_f32 (float32_t * __a, float32x4x3_t __b, const int __c) +{ + union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + __builtin_neon_vst3_lanev4sf ((__builtin_neon_sf *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3q_lane_u16 (uint16_t * __a, uint16x8x3_t __b, const int __c) +{ + union { uint16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + __builtin_neon_vst3_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3q_lane_u32 (uint32_t * __a, uint32x4x3_t __b, const int __c) +{ + union { uint32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + __builtin_neon_vst3_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst3q_lane_p16 (poly16_t * __a, poly16x8x3_t __b, const int __c) +{ + union { poly16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b }; + __builtin_neon_vst3_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline int8x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_s8 (const int8_t * __a) +{ + union { int8x8x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4v8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int16x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_s16 (const int16_t * __a) +{ + union { int16x4x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4v4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int32x2x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_s32 (const int32_t * __a) +{ + union { int32x2x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4v2si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_f16 (const float16_t * __a) +{ + union { float16x4x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4v4hf (__a); + return __rv.__i; +} +#endif + +__extension__ extern __inline float32x2x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_f32 (const float32_t * __a) +{ + union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4v2sf ((const __builtin_neon_sf *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint8x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_u8 (const uint8_t * __a) +{ + union { uint8x8x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4v8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint16x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_u16 (const uint16_t * __a) +{ + union { uint16x4x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4v4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint32x2x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_u32 (const uint32_t * __a) +{ + union { uint32x2x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4v2si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly8x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_p8 (const poly8_t * __a) +{ + union { poly8x8x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4v8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly16x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_p16 (const poly16_t * __a) +{ + union { poly16x4x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4v4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_p64 (const poly64_t * __a) +{ + union { poly64x1x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4di ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +#pragma GCC pop_options +__extension__ extern __inline int64x1x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_s64 (const int64_t * __a) +{ + union { int64x1x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4di ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint64x1x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_u64 (const uint64_t * __a) +{ + union { uint64x1x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4di ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +__extension__ extern __inline int8x16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4q_s8 (const int8_t * __a) +{ + union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld4v16qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int16x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4q_s16 (const int16_t * __a) +{ + union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld4v8hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int32x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4q_s32 (const int32_t * __a) +{ + union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld4v4si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4q_f16 (const float16_t * __a) +{ + union { float16x8x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld4v8hf (__a); + return __rv.__i; +} +#endif + +__extension__ extern __inline float32x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4q_f32 (const float32_t * __a) +{ + union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld4v4sf ((const __builtin_neon_sf *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint8x16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4q_u8 (const uint8_t * __a) +{ + union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld4v16qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint16x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4q_u16 (const uint16_t * __a) +{ + union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld4v8hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint32x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4q_u32 (const uint32_t * __a) +{ + union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld4v4si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly8x16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4q_p8 (const poly8_t * __a) +{ + union { poly8x16x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld4v16qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly16x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4q_p16 (const poly16_t * __a) +{ + union { poly16x8x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld4v8hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int8x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_lane_s8 (const int8_t * __a, int8x8x4_t __b, const int __c) +{ + union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + union { int8x8x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline int16x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_lane_s16 (const int16_t * __a, int16x4x4_t __b, const int __c) +{ + union { int16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + union { int16x4x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline int32x2x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_lane_s32 (const int32_t * __a, int32x2x4_t __b, const int __c) +{ + union { int32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + union { int32x2x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c); + return __rv.__i; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_lane_f16 (const float16_t * __a, float16x4x4_t __b, const int __c) +{ + union { float16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + union { float16x4x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_lanev4hf (__a, + __bu.__o, __c); + return __rv.__i; +} +#endif + +__extension__ extern __inline float32x2x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_lane_f32 (const float32_t * __a, float32x2x4_t __b, const int __c) +{ + union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_lanev2sf ((const __builtin_neon_sf *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline uint8x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_lane_u8 (const uint8_t * __a, uint8x8x4_t __b, const int __c) +{ + union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + union { uint8x8x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline uint16x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_lane_u16 (const uint16_t * __a, uint16x4x4_t __b, const int __c) +{ + union { uint16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + union { uint16x4x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline uint32x2x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_lane_u32 (const uint32_t * __a, uint32x2x4_t __b, const int __c) +{ + union { uint32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + union { uint32x2x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline poly8x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_lane_p8 (const poly8_t * __a, poly8x8x4_t __b, const int __c) +{ + union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + union { poly8x8x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline poly16x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_lane_p16 (const poly16_t * __a, poly16x4x4_t __b, const int __c) +{ + union { poly16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + union { poly16x4x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline int16x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4q_lane_s16 (const int16_t * __a, int16x8x4_t __b, const int __c) +{ + union { int16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld4_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline int32x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4q_lane_s32 (const int32_t * __a, int32x4x4_t __b, const int __c) +{ + union { int32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld4_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c); + return __rv.__i; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4q_lane_f16 (const float16_t * __a, float16x8x4_t __b, const int __c) +{ + union { float16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + union { float16x8x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld4_lanev8hf (__a, + __bu.__o, __c); + return __rv.__i; +} +#endif + +__extension__ extern __inline float32x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4q_lane_f32 (const float32_t * __a, float32x4x4_t __b, const int __c) +{ + union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld4_lanev4sf ((const __builtin_neon_sf *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline uint16x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4q_lane_u16 (const uint16_t * __a, uint16x8x4_t __b, const int __c) +{ + union { uint16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld4_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline uint32x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4q_lane_u32 (const uint32_t * __a, uint32x4x4_t __b, const int __c) +{ + union { uint32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld4_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline poly16x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4q_lane_p16 (const poly16_t * __a, poly16x8x4_t __b, const int __c) +{ + union { poly16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + union { poly16x8x4_t __i; __builtin_neon_xi __o; } __rv; + __rv.__o = __builtin_neon_vld4_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c); + return __rv.__i; +} + +__extension__ extern __inline int8x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_dup_s8 (const int8_t * __a) +{ + union { int8x8x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_dupv8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int16x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_dup_s16 (const int16_t * __a) +{ + union { int16x4x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_dupv4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline int32x2x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_dup_s32 (const int32_t * __a) +{ + union { int32x2x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_dupv2si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_dup_f16 (const float16_t * __a) +{ + union { float16x4x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_dupv4hf (__a); + return __rv.__i; +} +#endif + +__extension__ extern __inline float32x2x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_dup_f32 (const float32_t * __a) +{ + union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_dupv2sf ((const __builtin_neon_sf *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint8x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_dup_u8 (const uint8_t * __a) +{ + union { uint8x8x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_dupv8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint16x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_dup_u16 (const uint16_t * __a) +{ + union { uint16x4x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_dupv4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint32x2x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_dup_u32 (const uint32_t * __a) +{ + union { uint32x2x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_dupv2si ((const __builtin_neon_si *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly8x8x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_dup_p8 (const poly8_t * __a) +{ + union { poly8x8x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_dupv8qi ((const __builtin_neon_qi *) __a); + return __rv.__i; +} + +__extension__ extern __inline poly16x4x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_dup_p16 (const poly16_t * __a) +{ + union { poly16x4x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_dupv4hi ((const __builtin_neon_hi *) __a); + return __rv.__i; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_dup_p64 (const poly64_t * __a) +{ + union { poly64x1x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_dupdi ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +#pragma GCC pop_options +__extension__ extern __inline int64x1x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_dup_s64 (const int64_t * __a) +{ + union { int64x1x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_dupdi ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +__extension__ extern __inline uint64x1x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld4_dup_u64 (const uint64_t * __a) +{ + union { uint64x1x4_t __i; __builtin_neon_oi __o; } __rv; + __rv.__o = __builtin_neon_vld4_dupdi ((const __builtin_neon_di *) __a); + return __rv.__i; +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_s8 (int8_t * __a, int8x8x4_t __b) +{ + union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4v8qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_s16 (int16_t * __a, int16x4x4_t __b) +{ + union { int16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4v4hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_s32 (int32_t * __a, int32x2x4_t __b) +{ + union { int32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4v2si ((__builtin_neon_si *) __a, __bu.__o); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_f16 (float16_t * __a, float16x4x4_t __b) +{ + union { float16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4v4hf (__a, __bu.__o); +} +#endif + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_f32 (float32_t * __a, float32x2x4_t __b) +{ + union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4v2sf ((__builtin_neon_sf *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_u8 (uint8_t * __a, uint8x8x4_t __b) +{ + union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4v8qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_u16 (uint16_t * __a, uint16x4x4_t __b) +{ + union { uint16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4v4hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_u32 (uint32_t * __a, uint32x2x4_t __b) +{ + union { uint32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4v2si ((__builtin_neon_si *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_p8 (poly8_t * __a, poly8x8x4_t __b) +{ + union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4v8qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_p16 (poly16_t * __a, poly16x4x4_t __b) +{ + union { poly16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4v4hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_p64 (poly64_t * __a, poly64x1x4_t __b) +{ + union { poly64x1x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4di ((__builtin_neon_di *) __a, __bu.__o); +} + +#pragma GCC pop_options +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_s64 (int64_t * __a, int64x1x4_t __b) +{ + union { int64x1x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4di ((__builtin_neon_di *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_u64 (uint64_t * __a, uint64x1x4_t __b) +{ + union { uint64x1x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4di ((__builtin_neon_di *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4q_s8 (int8_t * __a, int8x16x4_t __b) +{ + union { int8x16x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + __builtin_neon_vst4v16qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4q_s16 (int16_t * __a, int16x8x4_t __b) +{ + union { int16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + __builtin_neon_vst4v8hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4q_s32 (int32_t * __a, int32x4x4_t __b) +{ + union { int32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + __builtin_neon_vst4v4si ((__builtin_neon_si *) __a, __bu.__o); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4q_f16 (float16_t * __a, float16x8x4_t __b) +{ + union { float16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + __builtin_neon_vst4v8hf (__a, __bu.__o); +} +#endif + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4q_f32 (float32_t * __a, float32x4x4_t __b) +{ + union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + __builtin_neon_vst4v4sf ((__builtin_neon_sf *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4q_u8 (uint8_t * __a, uint8x16x4_t __b) +{ + union { uint8x16x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + __builtin_neon_vst4v16qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4q_u16 (uint16_t * __a, uint16x8x4_t __b) +{ + union { uint16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + __builtin_neon_vst4v8hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4q_u32 (uint32_t * __a, uint32x4x4_t __b) +{ + union { uint32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + __builtin_neon_vst4v4si ((__builtin_neon_si *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4q_p8 (poly8_t * __a, poly8x16x4_t __b) +{ + union { poly8x16x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + __builtin_neon_vst4v16qi ((__builtin_neon_qi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4q_p16 (poly16_t * __a, poly16x8x4_t __b) +{ + union { poly16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + __builtin_neon_vst4v8hi ((__builtin_neon_hi *) __a, __bu.__o); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_lane_s8 (int8_t * __a, int8x8x4_t __b, const int __c) +{ + union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_lane_s16 (int16_t * __a, int16x4x4_t __b, const int __c) +{ + union { int16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_lane_s32 (int32_t * __a, int32x2x4_t __b, const int __c) +{ + union { int32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_lane_f16 (float16_t * __a, float16x4x4_t __b, const int __c) +{ + union { float16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4_lanev4hf (__a, __bu.__o, __c); +} +#endif + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_lane_f32 (float32_t * __a, float32x2x4_t __b, const int __c) +{ + union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4_lanev2sf ((__builtin_neon_sf *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_lane_u8 (uint8_t * __a, uint8x8x4_t __b, const int __c) +{ + union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_lane_u16 (uint16_t * __a, uint16x4x4_t __b, const int __c) +{ + union { uint16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_lane_u32 (uint32_t * __a, uint32x2x4_t __b, const int __c) +{ + union { uint32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_lane_p8 (poly8_t * __a, poly8x8x4_t __b, const int __c) +{ + union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4_lane_p16 (poly16_t * __a, poly16x4x4_t __b, const int __c) +{ + union { poly16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b }; + __builtin_neon_vst4_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4q_lane_s16 (int16_t * __a, int16x8x4_t __b, const int __c) +{ + union { int16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + __builtin_neon_vst4_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4q_lane_s32 (int32_t * __a, int32x4x4_t __b, const int __c) +{ + union { int32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + __builtin_neon_vst4_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c); +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4q_lane_f16 (float16_t * __a, float16x8x4_t __b, const int __c) +{ + union { float16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + __builtin_neon_vst4_lanev8hf (__a, __bu.__o, __c); +} +#endif + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4q_lane_f32 (float32_t * __a, float32x4x4_t __b, const int __c) +{ + union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + __builtin_neon_vst4_lanev4sf ((__builtin_neon_sf *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4q_lane_u16 (uint16_t * __a, uint16x8x4_t __b, const int __c) +{ + union { uint16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + __builtin_neon_vst4_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4q_lane_u32 (uint32_t * __a, uint32x4x4_t __b, const int __c) +{ + union { uint32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + __builtin_neon_vst4_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst4q_lane_p16 (poly16_t * __a, poly16x8x4_t __b, const int __c) +{ + union { poly16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b }; + __builtin_neon_vst4_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c); +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vand_s8 (int8x8_t __a, int8x8_t __b) +{ + return __a & __b; +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vand_s16 (int16x4_t __a, int16x4_t __b) +{ + return __a & __b; +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vand_s32 (int32x2_t __a, int32x2_t __b) +{ + return __a & __b; +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vand_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return __a & __b; +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vand_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return __a & __b; +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vand_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return __a & __b; +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vand_s64 (int64x1_t __a, int64x1_t __b) +{ + return __a & __b; +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vand_u64 (uint64x1_t __a, uint64x1_t __b) +{ + return __a & __b; +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vandq_s8 (int8x16_t __a, int8x16_t __b) +{ + return __a & __b; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vandq_s16 (int16x8_t __a, int16x8_t __b) +{ + return __a & __b; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vandq_s32 (int32x4_t __a, int32x4_t __b) +{ + return __a & __b; +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vandq_s64 (int64x2_t __a, int64x2_t __b) +{ + return __a & __b; +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vandq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return __a & __b; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vandq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return __a & __b; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vandq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return __a & __b; +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vandq_u64 (uint64x2_t __a, uint64x2_t __b) +{ + return __a & __b; +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorr_s8 (int8x8_t __a, int8x8_t __b) +{ + return __a | __b; +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorr_s16 (int16x4_t __a, int16x4_t __b) +{ + return __a | __b; +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorr_s32 (int32x2_t __a, int32x2_t __b) +{ + return __a | __b; +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorr_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return __a | __b; +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorr_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return __a | __b; +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorr_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return __a | __b; +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorr_s64 (int64x1_t __a, int64x1_t __b) +{ + return __a | __b; +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorr_u64 (uint64x1_t __a, uint64x1_t __b) +{ + return __a | __b; +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorrq_s8 (int8x16_t __a, int8x16_t __b) +{ + return __a | __b; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorrq_s16 (int16x8_t __a, int16x8_t __b) +{ + return __a | __b; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorrq_s32 (int32x4_t __a, int32x4_t __b) +{ + return __a | __b; +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorrq_s64 (int64x2_t __a, int64x2_t __b) +{ + return __a | __b; +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorrq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return __a | __b; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorrq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return __a | __b; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorrq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return __a | __b; +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorrq_u64 (uint64x2_t __a, uint64x2_t __b) +{ + return __a | __b; +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +veor_s8 (int8x8_t __a, int8x8_t __b) +{ + return __a ^ __b; +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +veor_s16 (int16x4_t __a, int16x4_t __b) +{ + return __a ^ __b; +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +veor_s32 (int32x2_t __a, int32x2_t __b) +{ + return __a ^ __b; +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +veor_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return __a ^ __b; +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +veor_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return __a ^ __b; +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +veor_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return __a ^ __b; +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +veor_s64 (int64x1_t __a, int64x1_t __b) +{ + return __a ^ __b; +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +veor_u64 (uint64x1_t __a, uint64x1_t __b) +{ + return __a ^ __b; +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +veorq_s8 (int8x16_t __a, int8x16_t __b) +{ + return __a ^ __b; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +veorq_s16 (int16x8_t __a, int16x8_t __b) +{ + return __a ^ __b; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +veorq_s32 (int32x4_t __a, int32x4_t __b) +{ + return __a ^ __b; +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +veorq_s64 (int64x2_t __a, int64x2_t __b) +{ + return __a ^ __b; +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +veorq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return __a ^ __b; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +veorq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return __a ^ __b; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +veorq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return __a ^ __b; +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +veorq_u64 (uint64x2_t __a, uint64x2_t __b) +{ + return __a ^ __b; +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbic_s8 (int8x8_t __a, int8x8_t __b) +{ + return __a & ~__b; +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbic_s16 (int16x4_t __a, int16x4_t __b) +{ + return __a & ~__b; +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbic_s32 (int32x2_t __a, int32x2_t __b) +{ + return __a & ~__b; +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbic_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return __a & ~__b; +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbic_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return __a & ~__b; +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbic_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return __a & ~__b; +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbic_s64 (int64x1_t __a, int64x1_t __b) +{ + return __a & ~__b; +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbic_u64 (uint64x1_t __a, uint64x1_t __b) +{ + return __a & ~__b; +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbicq_s8 (int8x16_t __a, int8x16_t __b) +{ + return __a & ~__b; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbicq_s16 (int16x8_t __a, int16x8_t __b) +{ + return __a & ~__b; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbicq_s32 (int32x4_t __a, int32x4_t __b) +{ + return __a & ~__b; +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbicq_s64 (int64x2_t __a, int64x2_t __b) +{ + return __a & ~__b; +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbicq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return __a & ~__b; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbicq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return __a & ~__b; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbicq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return __a & ~__b; +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbicq_u64 (uint64x2_t __a, uint64x2_t __b) +{ + return __a & ~__b; +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorn_s8 (int8x8_t __a, int8x8_t __b) +{ + return __a | ~__b; +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorn_s16 (int16x4_t __a, int16x4_t __b) +{ + return __a | ~__b; +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorn_s32 (int32x2_t __a, int32x2_t __b) +{ + return __a | ~__b; +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorn_u8 (uint8x8_t __a, uint8x8_t __b) +{ + return __a | ~__b; +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorn_u16 (uint16x4_t __a, uint16x4_t __b) +{ + return __a | ~__b; +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorn_u32 (uint32x2_t __a, uint32x2_t __b) +{ + return __a | ~__b; +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorn_s64 (int64x1_t __a, int64x1_t __b) +{ + return __a | ~__b; +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vorn_u64 (uint64x1_t __a, uint64x1_t __b) +{ + return __a | ~__b; +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vornq_s8 (int8x16_t __a, int8x16_t __b) +{ + return __a | ~__b; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vornq_s16 (int16x8_t __a, int16x8_t __b) +{ + return __a | ~__b; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vornq_s32 (int32x4_t __a, int32x4_t __b) +{ + return __a | ~__b; +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vornq_s64 (int64x2_t __a, int64x2_t __b) +{ + return __a | ~__b; +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vornq_u8 (uint8x16_t __a, uint8x16_t __b) +{ + return __a | ~__b; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vornq_u16 (uint16x8_t __a, uint16x8_t __b) +{ + return __a | ~__b; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vornq_u32 (uint32x4_t __a, uint32x4_t __b) +{ + return __a | ~__b; +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vornq_u64 (uint64x2_t __a, uint64x2_t __b) +{ + return __a | ~__b; +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p8_p16 (poly16x4_t __a) +{ + return (poly8x8_t) __a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p8_f16 (float16x4_t __a) +{ + return (poly8x8_t) __a; +} +#endif + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p8_f32 (float32x2_t __a) +{ + return (poly8x8_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p8_p64 (poly64x1_t __a) +{ + return (poly8x8_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p8_s64 (int64x1_t __a) +{ + return (poly8x8_t)__a; +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p8_u64 (uint64x1_t __a) +{ + return (poly8x8_t)__a; +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p8_s8 (int8x8_t __a) +{ + return (poly8x8_t)__a; +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p8_s16 (int16x4_t __a) +{ + return (poly8x8_t)__a; +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p8_s32 (int32x2_t __a) +{ + return (poly8x8_t)__a; +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p8_u8 (uint8x8_t __a) +{ + return (poly8x8_t)__a; +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p8_u16 (uint16x4_t __a) +{ + return (poly8x8_t)__a; +} + +__extension__ extern __inline poly8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p8_u32 (uint32x2_t __a) +{ + return (poly8x8_t)__a; +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p16_p8 (poly8x8_t __a) +{ + return (poly16x4_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p16_f16 (float16x4_t __a) +{ + return (poly16x4_t) __a; +} +#endif + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p16_f32 (float32x2_t __a) +{ + return (poly16x4_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p16_p64 (poly64x1_t __a) +{ + return (poly16x4_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p16_s64 (int64x1_t __a) +{ + return (poly16x4_t)__a; +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p16_u64 (uint64x1_t __a) +{ + return (poly16x4_t)__a; +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p16_s8 (int8x8_t __a) +{ + return (poly16x4_t)__a; +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p16_s16 (int16x4_t __a) +{ + return (poly16x4_t)__a; +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p16_s32 (int32x2_t __a) +{ + return (poly16x4_t)__a; +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p16_u8 (uint8x8_t __a) +{ + return (poly16x4_t)__a; +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p16_u16 (uint16x4_t __a) +{ + return (poly16x4_t)__a; +} + +__extension__ extern __inline poly16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p16_u32 (uint32x2_t __a) +{ + return (poly16x4_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f16_p8 (poly8x8_t __a) +{ + return (float16x4_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f16_p16 (poly16x4_t __a) +{ + return (float16x4_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f16_f32 (float32x2_t __a) +{ + return (float16x4_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f16_p64 (poly64x1_t __a) +{ + return (float16x4_t) __a; +} +#pragma GCC pop_options +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f16_s64 (int64x1_t __a) +{ + return (float16x4_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f16_u64 (uint64x1_t __a) +{ + return (float16x4_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f16_s8 (int8x8_t __a) +{ + return (float16x4_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f16_s16 (int16x4_t __a) +{ + return (float16x4_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f16_s32 (int32x2_t __a) +{ + return (float16x4_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f16_u8 (uint8x8_t __a) +{ + return (float16x4_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f16_u16 (uint16x4_t __a) +{ + return (float16x4_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f16_u32 (uint32x2_t __a) +{ + return (float16x4_t) __a; +} +#endif + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f32_p8 (poly8x8_t __a) +{ + return (float32x2_t)__a; +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f32_p16 (poly16x4_t __a) +{ + return (float32x2_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f32_f16 (float16x4_t __a) +{ + return (float32x2_t) __a; +} +#endif + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f32_p64 (poly64x1_t __a) +{ + return (float32x2_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f32_s64 (int64x1_t __a) +{ + return (float32x2_t)__a; +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f32_u64 (uint64x1_t __a) +{ + return (float32x2_t)__a; +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f32_s8 (int8x8_t __a) +{ + return (float32x2_t)__a; +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f32_s16 (int16x4_t __a) +{ + return (float32x2_t)__a; +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f32_s32 (int32x2_t __a) +{ + return (float32x2_t)__a; +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f32_u8 (uint8x8_t __a) +{ + return (float32x2_t)__a; +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f32_u16 (uint16x4_t __a) +{ + return (float32x2_t)__a; +} + +__extension__ extern __inline float32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_f32_u32 (uint32x2_t __a) +{ + return (float32x2_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p64_p8 (poly8x8_t __a) +{ + return (poly64x1_t)__a; +} + +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p64_p16 (poly16x4_t __a) +{ + return (poly64x1_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p64_f16 (float16x4_t __a) +{ + return (poly64x1_t) __a; +} +#endif + +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p64_f32 (float32x2_t __a) +{ + return (poly64x1_t)__a; +} + +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p64_s64 (int64x1_t __a) +{ + return (poly64x1_t)__a; +} + +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p64_u64 (uint64x1_t __a) +{ + return (poly64x1_t)__a; +} + +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p64_s8 (int8x8_t __a) +{ + return (poly64x1_t)__a; +} + +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p64_s16 (int16x4_t __a) +{ + return (poly64x1_t)__a; +} + +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p64_s32 (int32x2_t __a) +{ + return (poly64x1_t)__a; +} + +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p64_u8 (uint8x8_t __a) +{ + return (poly64x1_t)__a; +} + +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p64_u16 (uint16x4_t __a) +{ + return (poly64x1_t)__a; +} + +__extension__ extern __inline poly64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_p64_u32 (uint32x2_t __a) +{ + return (poly64x1_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s64_p8 (poly8x8_t __a) +{ + return (int64x1_t)__a; +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s64_p16 (poly16x4_t __a) +{ + return (int64x1_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s64_f16 (float16x4_t __a) +{ + return (int64x1_t) __a; +} +#endif + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s64_f32 (float32x2_t __a) +{ + return (int64x1_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s64_p64 (poly64x1_t __a) +{ + return (int64x1_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s64_u64 (uint64x1_t __a) +{ + return (int64x1_t)__a; +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s64_s8 (int8x8_t __a) +{ + return (int64x1_t)__a; +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s64_s16 (int16x4_t __a) +{ + return (int64x1_t)__a; +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s64_s32 (int32x2_t __a) +{ + return (int64x1_t)__a; +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s64_u8 (uint8x8_t __a) +{ + return (int64x1_t)__a; +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s64_u16 (uint16x4_t __a) +{ + return (int64x1_t)__a; +} + +__extension__ extern __inline int64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s64_u32 (uint32x2_t __a) +{ + return (int64x1_t)__a; +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u64_p8 (poly8x8_t __a) +{ + return (uint64x1_t)__a; +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u64_p16 (poly16x4_t __a) +{ + return (uint64x1_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u64_f16 (float16x4_t __a) +{ + return (uint64x1_t) __a; +} +#endif + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u64_f32 (float32x2_t __a) +{ + return (uint64x1_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u64_p64 (poly64x1_t __a) +{ + return (uint64x1_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u64_s64 (int64x1_t __a) +{ + return (uint64x1_t)__a; +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u64_s8 (int8x8_t __a) +{ + return (uint64x1_t)__a; +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u64_s16 (int16x4_t __a) +{ + return (uint64x1_t)__a; +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u64_s32 (int32x2_t __a) +{ + return (uint64x1_t)__a; +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u64_u8 (uint8x8_t __a) +{ + return (uint64x1_t)__a; +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u64_u16 (uint16x4_t __a) +{ + return (uint64x1_t)__a; +} + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u64_u32 (uint32x2_t __a) +{ + return (uint64x1_t)__a; +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s8_p8 (poly8x8_t __a) +{ + return (int8x8_t)__a; +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s8_p16 (poly16x4_t __a) +{ + return (int8x8_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s8_f16 (float16x4_t __a) +{ + return (int8x8_t) __a; +} +#endif + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s8_f32 (float32x2_t __a) +{ + return (int8x8_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s8_p64 (poly64x1_t __a) +{ + return (int8x8_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s8_s64 (int64x1_t __a) +{ + return (int8x8_t)__a; +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s8_u64 (uint64x1_t __a) +{ + return (int8x8_t)__a; +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s8_s16 (int16x4_t __a) +{ + return (int8x8_t)__a; +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s8_s32 (int32x2_t __a) +{ + return (int8x8_t)__a; +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s8_u8 (uint8x8_t __a) +{ + return (int8x8_t)__a; +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s8_u16 (uint16x4_t __a) +{ + return (int8x8_t)__a; +} + +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s8_u32 (uint32x2_t __a) +{ + return (int8x8_t)__a; +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s16_p8 (poly8x8_t __a) +{ + return (int16x4_t)__a; +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s16_p16 (poly16x4_t __a) +{ + return (int16x4_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s16_f16 (float16x4_t __a) +{ + return (int16x4_t) __a; +} +#endif + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s16_f32 (float32x2_t __a) +{ + return (int16x4_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s16_p64 (poly64x1_t __a) +{ + return (int16x4_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s16_s64 (int64x1_t __a) +{ + return (int16x4_t)__a; +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s16_u64 (uint64x1_t __a) +{ + return (int16x4_t)__a; +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s16_s8 (int8x8_t __a) +{ + return (int16x4_t)__a; +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s16_s32 (int32x2_t __a) +{ + return (int16x4_t)__a; +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s16_u8 (uint8x8_t __a) +{ + return (int16x4_t)__a; +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s16_u16 (uint16x4_t __a) +{ + return (int16x4_t)__a; +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s16_u32 (uint32x2_t __a) +{ + return (int16x4_t)__a; +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s32_p8 (poly8x8_t __a) +{ + return (int32x2_t)__a; +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s32_p16 (poly16x4_t __a) +{ + return (int32x2_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s32_f16 (float16x4_t __a) +{ + return (int32x2_t) __a; +} +#endif + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s32_f32 (float32x2_t __a) +{ + return (int32x2_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s32_p64 (poly64x1_t __a) +{ + return (int32x2_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s32_s64 (int64x1_t __a) +{ + return (int32x2_t)__a; +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s32_u64 (uint64x1_t __a) +{ + return (int32x2_t)__a; +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s32_s8 (int8x8_t __a) +{ + return (int32x2_t)__a; +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s32_s16 (int16x4_t __a) +{ + return (int32x2_t)__a; +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s32_u8 (uint8x8_t __a) +{ + return (int32x2_t)__a; +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s32_u16 (uint16x4_t __a) +{ + return (int32x2_t)__a; +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_s32_u32 (uint32x2_t __a) +{ + return (int32x2_t)__a; +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u8_p8 (poly8x8_t __a) +{ + return (uint8x8_t)__a; +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u8_p16 (poly16x4_t __a) +{ + return (uint8x8_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u8_f16 (float16x4_t __a) +{ + return (uint8x8_t) __a; +} +#endif + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u8_f32 (float32x2_t __a) +{ + return (uint8x8_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u8_p64 (poly64x1_t __a) +{ + return (uint8x8_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u8_s64 (int64x1_t __a) +{ + return (uint8x8_t)__a; +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u8_u64 (uint64x1_t __a) +{ + return (uint8x8_t)__a; +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u8_s8 (int8x8_t __a) +{ + return (uint8x8_t)__a; +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u8_s16 (int16x4_t __a) +{ + return (uint8x8_t)__a; +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u8_s32 (int32x2_t __a) +{ + return (uint8x8_t)__a; +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u8_u16 (uint16x4_t __a) +{ + return (uint8x8_t)__a; +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u8_u32 (uint32x2_t __a) +{ + return (uint8x8_t)__a; +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u16_p8 (poly8x8_t __a) +{ + return (uint16x4_t)__a; +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u16_p16 (poly16x4_t __a) +{ + return (uint16x4_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u16_f16 (float16x4_t __a) +{ + return (uint16x4_t) __a; +} +#endif + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u16_f32 (float32x2_t __a) +{ + return (uint16x4_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u16_p64 (poly64x1_t __a) +{ + return (uint16x4_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u16_s64 (int64x1_t __a) +{ + return (uint16x4_t)__a; +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u16_u64 (uint64x1_t __a) +{ + return (uint16x4_t)__a; +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u16_s8 (int8x8_t __a) +{ + return (uint16x4_t)__a; +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u16_s16 (int16x4_t __a) +{ + return (uint16x4_t)__a; +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u16_s32 (int32x2_t __a) +{ + return (uint16x4_t)__a; +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u16_u8 (uint8x8_t __a) +{ + return (uint16x4_t)__a; +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u16_u32 (uint32x2_t __a) +{ + return (uint16x4_t)__a; +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u32_p8 (poly8x8_t __a) +{ + return (uint32x2_t)__a; +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u32_p16 (poly16x4_t __a) +{ + return (uint32x2_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u32_f16 (float16x4_t __a) +{ + return (uint32x2_t) __a; +} +#endif + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u32_f32 (float32x2_t __a) +{ + return (uint32x2_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u32_p64 (poly64x1_t __a) +{ + return (uint32x2_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u32_s64 (int64x1_t __a) +{ + return (uint32x2_t)__a; +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u32_u64 (uint64x1_t __a) +{ + return (uint32x2_t)__a; +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u32_s8 (int8x8_t __a) +{ + return (uint32x2_t)__a; +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u32_s16 (int16x4_t __a) +{ + return (uint32x2_t)__a; +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u32_s32 (int32x2_t __a) +{ + return (uint32x2_t)__a; +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u32_u8 (uint8x8_t __a) +{ + return (uint32x2_t)__a; +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpret_u32_u16 (uint16x4_t __a) +{ + return (uint32x2_t)__a; +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p8_p16 (poly16x8_t __a) +{ + return (poly8x16_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p8_f16 (float16x8_t __a) +{ + return (poly8x16_t) __a; +} +#endif + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p8_f32 (float32x4_t __a) +{ + return (poly8x16_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p8_p64 (poly64x2_t __a) +{ + return (poly8x16_t)__a; +} + + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p8_p128 (poly128_t __a) +{ + return (poly8x16_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p8_s64 (int64x2_t __a) +{ + return (poly8x16_t)__a; +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p8_u64 (uint64x2_t __a) +{ + return (poly8x16_t)__a; +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p8_s8 (int8x16_t __a) +{ + return (poly8x16_t)__a; +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p8_s16 (int16x8_t __a) +{ + return (poly8x16_t)__a; +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p8_s32 (int32x4_t __a) +{ + return (poly8x16_t)__a; +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p8_u8 (uint8x16_t __a) +{ + return (poly8x16_t)__a; +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p8_u16 (uint16x8_t __a) +{ + return (poly8x16_t)__a; +} + +__extension__ extern __inline poly8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p8_u32 (uint32x4_t __a) +{ + return (poly8x16_t)__a; +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p16_p8 (poly8x16_t __a) +{ + return (poly16x8_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p16_f16 (float16x8_t __a) +{ + return (poly16x8_t) __a; +} +#endif + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p16_f32 (float32x4_t __a) +{ + return (poly16x8_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p16_p64 (poly64x2_t __a) +{ + return (poly16x8_t)__a; +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p16_p128 (poly128_t __a) +{ + return (poly16x8_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p16_s64 (int64x2_t __a) +{ + return (poly16x8_t)__a; +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p16_u64 (uint64x2_t __a) +{ + return (poly16x8_t)__a; +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p16_s8 (int8x16_t __a) +{ + return (poly16x8_t)__a; +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p16_s16 (int16x8_t __a) +{ + return (poly16x8_t)__a; +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p16_s32 (int32x4_t __a) +{ + return (poly16x8_t)__a; +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p16_u8 (uint8x16_t __a) +{ + return (poly16x8_t)__a; +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p16_u16 (uint16x8_t __a) +{ + return (poly16x8_t)__a; +} + +__extension__ extern __inline poly16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p16_u32 (uint32x4_t __a) +{ + return (poly16x8_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f16_p8 (poly8x16_t __a) +{ + return (float16x8_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f16_p16 (poly16x8_t __a) +{ + return (float16x8_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f16_f32 (float32x4_t __a) +{ + return (float16x8_t) __a; +} +#endif + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f16_p64 (poly64x2_t __a) +{ + return (float16x8_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f16_p128 (poly128_t __a) +{ + return (float16x8_t) __a; +} +#endif + +#pragma GCC pop_options + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f16_s64 (int64x2_t __a) +{ + return (float16x8_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f16_u64 (uint64x2_t __a) +{ + return (float16x8_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f16_s8 (int8x16_t __a) +{ + return (float16x8_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f16_s16 (int16x8_t __a) +{ + return (float16x8_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f16_s32 (int32x4_t __a) +{ + return (float16x8_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f16_u8 (uint8x16_t __a) +{ + return (float16x8_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f16_u16 (uint16x8_t __a) +{ + return (float16x8_t) __a; +} +#endif + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f16_u32 (uint32x4_t __a) +{ + return (float16x8_t) __a; +} +#endif + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f32_p8 (poly8x16_t __a) +{ + return (float32x4_t)__a; +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f32_p16 (poly16x8_t __a) +{ + return (float32x4_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f32_f16 (float16x8_t __a) +{ + return (float32x4_t) __a; +} +#endif + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f32_p64 (poly64x2_t __a) +{ + return (float32x4_t)__a; +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f32_p128 (poly128_t __a) +{ + return (float32x4_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f32_s64 (int64x2_t __a) +{ + return (float32x4_t)__a; +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f32_u64 (uint64x2_t __a) +{ + return (float32x4_t)__a; +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f32_s8 (int8x16_t __a) +{ + return (float32x4_t)__a; +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f32_s16 (int16x8_t __a) +{ + return (float32x4_t)__a; +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f32_s32 (int32x4_t __a) +{ + return (float32x4_t)__a; +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f32_u8 (uint8x16_t __a) +{ + return (float32x4_t)__a; +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f32_u16 (uint16x8_t __a) +{ + return (float32x4_t)__a; +} + +__extension__ extern __inline float32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_f32_u32 (uint32x4_t __a) +{ + return (float32x4_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p64_p8 (poly8x16_t __a) +{ + return (poly64x2_t)__a; +} + +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p64_p16 (poly16x8_t __a) +{ + return (poly64x2_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p64_f16 (float16x8_t __a) +{ + return (poly64x2_t) __a; +} +#endif + +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p64_f32 (float32x4_t __a) +{ + return (poly64x2_t)__a; +} + +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p64_p128 (poly128_t __a) +{ + return (poly64x2_t)__a; +} + +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p64_s64 (int64x2_t __a) +{ + return (poly64x2_t)__a; +} + +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p64_u64 (uint64x2_t __a) +{ + return (poly64x2_t)__a; +} + +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p64_s8 (int8x16_t __a) +{ + return (poly64x2_t)__a; +} + +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p64_s16 (int16x8_t __a) +{ + return (poly64x2_t)__a; +} + +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p64_s32 (int32x4_t __a) +{ + return (poly64x2_t)__a; +} + +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p64_u8 (uint8x16_t __a) +{ + return (poly64x2_t)__a; +} + +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p64_u16 (uint16x8_t __a) +{ + return (poly64x2_t)__a; +} + +__extension__ extern __inline poly64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p64_u32 (uint32x4_t __a) +{ + return (poly64x2_t)__a; +} + +__extension__ extern __inline poly128_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p128_p8 (poly8x16_t __a) +{ + return (poly128_t)__a; +} + +__extension__ extern __inline poly128_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p128_p16 (poly16x8_t __a) +{ + return (poly128_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline poly128_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p128_f16 (float16x8_t __a) +{ + return (poly128_t) __a; +} +#endif + +__extension__ extern __inline poly128_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p128_f32 (float32x4_t __a) +{ + return (poly128_t)__a; +} + +__extension__ extern __inline poly128_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p128_p64 (poly64x2_t __a) +{ + return (poly128_t)__a; +} + +__extension__ extern __inline poly128_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p128_s64 (int64x2_t __a) +{ + return (poly128_t)__a; +} + +__extension__ extern __inline poly128_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p128_u64 (uint64x2_t __a) +{ + return (poly128_t)__a; +} + +__extension__ extern __inline poly128_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p128_s8 (int8x16_t __a) +{ + return (poly128_t)__a; +} + +__extension__ extern __inline poly128_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p128_s16 (int16x8_t __a) +{ + return (poly128_t)__a; +} + +__extension__ extern __inline poly128_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p128_s32 (int32x4_t __a) +{ + return (poly128_t)__a; +} + +__extension__ extern __inline poly128_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p128_u8 (uint8x16_t __a) +{ + return (poly128_t)__a; +} + +__extension__ extern __inline poly128_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p128_u16 (uint16x8_t __a) +{ + return (poly128_t)__a; +} + +__extension__ extern __inline poly128_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_p128_u32 (uint32x4_t __a) +{ + return (poly128_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s64_p8 (poly8x16_t __a) +{ + return (int64x2_t)__a; +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s64_p16 (poly16x8_t __a) +{ + return (int64x2_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s64_f16 (float16x8_t __a) +{ + return (int64x2_t) __a; +} +#endif + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s64_f32 (float32x4_t __a) +{ + return (int64x2_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s64_p64 (poly64x2_t __a) +{ + return (int64x2_t)__a; +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s64_p128 (poly128_t __a) +{ + return (int64x2_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s64_u64 (uint64x2_t __a) +{ + return (int64x2_t)__a; +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s64_s8 (int8x16_t __a) +{ + return (int64x2_t)__a; +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s64_s16 (int16x8_t __a) +{ + return (int64x2_t)__a; +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s64_s32 (int32x4_t __a) +{ + return (int64x2_t)__a; +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s64_u8 (uint8x16_t __a) +{ + return (int64x2_t)__a; +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s64_u16 (uint16x8_t __a) +{ + return (int64x2_t)__a; +} + +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s64_u32 (uint32x4_t __a) +{ + return (int64x2_t)__a; +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u64_p8 (poly8x16_t __a) +{ + return (uint64x2_t)__a; +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u64_p16 (poly16x8_t __a) +{ + return (uint64x2_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u64_f16 (float16x8_t __a) +{ + return (uint64x2_t) __a; +} +#endif + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u64_f32 (float32x4_t __a) +{ + return (uint64x2_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u64_p64 (poly64x2_t __a) +{ + return (uint64x2_t)__a; +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u64_p128 (poly128_t __a) +{ + return (uint64x2_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u64_s64 (int64x2_t __a) +{ + return (uint64x2_t)__a; +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u64_s8 (int8x16_t __a) +{ + return (uint64x2_t)__a; +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u64_s16 (int16x8_t __a) +{ + return (uint64x2_t)__a; +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u64_s32 (int32x4_t __a) +{ + return (uint64x2_t)__a; +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u64_u8 (uint8x16_t __a) +{ + return (uint64x2_t)__a; +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u64_u16 (uint16x8_t __a) +{ + return (uint64x2_t)__a; +} + +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u64_u32 (uint32x4_t __a) +{ + return (uint64x2_t)__a; +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s8_p8 (poly8x16_t __a) +{ + return (int8x16_t)__a; +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s8_p16 (poly16x8_t __a) +{ + return (int8x16_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s8_f16 (float16x8_t __a) +{ + return (int8x16_t) __a; +} +#endif + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s8_f32 (float32x4_t __a) +{ + return (int8x16_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s8_p64 (poly64x2_t __a) +{ + return (int8x16_t)__a; +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s8_p128 (poly128_t __a) +{ + return (int8x16_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s8_s64 (int64x2_t __a) +{ + return (int8x16_t)__a; +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s8_u64 (uint64x2_t __a) +{ + return (int8x16_t)__a; +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s8_s16 (int16x8_t __a) +{ + return (int8x16_t)__a; +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s8_s32 (int32x4_t __a) +{ + return (int8x16_t)__a; +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s8_u8 (uint8x16_t __a) +{ + return (int8x16_t)__a; +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s8_u16 (uint16x8_t __a) +{ + return (int8x16_t)__a; +} + +__extension__ extern __inline int8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s8_u32 (uint32x4_t __a) +{ + return (int8x16_t)__a; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s16_p8 (poly8x16_t __a) +{ + return (int16x8_t)__a; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s16_p16 (poly16x8_t __a) +{ + return (int16x8_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s16_f16 (float16x8_t __a) +{ + return (int16x8_t) __a; +} +#endif + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s16_f32 (float32x4_t __a) +{ + return (int16x8_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s16_p64 (poly64x2_t __a) +{ + return (int16x8_t)__a; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s16_p128 (poly128_t __a) +{ + return (int16x8_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s16_s64 (int64x2_t __a) +{ + return (int16x8_t)__a; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s16_u64 (uint64x2_t __a) +{ + return (int16x8_t)__a; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s16_s8 (int8x16_t __a) +{ + return (int16x8_t)__a; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s16_s32 (int32x4_t __a) +{ + return (int16x8_t)__a; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s16_u8 (uint8x16_t __a) +{ + return (int16x8_t)__a; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s16_u16 (uint16x8_t __a) +{ + return (int16x8_t)__a; +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s16_u32 (uint32x4_t __a) +{ + return (int16x8_t)__a; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s32_p8 (poly8x16_t __a) +{ + return (int32x4_t)__a; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s32_p16 (poly16x8_t __a) +{ + return (int32x4_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s32_f16 (float16x8_t __a) +{ + return (int32x4_t)__a; +} +#endif + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s32_f32 (float32x4_t __a) +{ + return (int32x4_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s32_p64 (poly64x2_t __a) +{ + return (int32x4_t)__a; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s32_p128 (poly128_t __a) +{ + return (int32x4_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s32_s64 (int64x2_t __a) +{ + return (int32x4_t)__a; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s32_u64 (uint64x2_t __a) +{ + return (int32x4_t)__a; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s32_s8 (int8x16_t __a) +{ + return (int32x4_t)__a; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s32_s16 (int16x8_t __a) +{ + return (int32x4_t)__a; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s32_u8 (uint8x16_t __a) +{ + return (int32x4_t)__a; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s32_u16 (uint16x8_t __a) +{ + return (int32x4_t)__a; +} + +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_s32_u32 (uint32x4_t __a) +{ + return (int32x4_t)__a; +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u8_p8 (poly8x16_t __a) +{ + return (uint8x16_t)__a; +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u8_p16 (poly16x8_t __a) +{ + return (uint8x16_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u8_f16 (float16x8_t __a) +{ + return (uint8x16_t) __a; +} +#endif + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u8_f32 (float32x4_t __a) +{ + return (uint8x16_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u8_p64 (poly64x2_t __a) +{ + return (uint8x16_t)__a; +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u8_p128 (poly128_t __a) +{ + return (uint8x16_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u8_s64 (int64x2_t __a) +{ + return (uint8x16_t)__a; +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u8_u64 (uint64x2_t __a) +{ + return (uint8x16_t)__a; +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u8_s8 (int8x16_t __a) +{ + return (uint8x16_t)__a; +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u8_s16 (int16x8_t __a) +{ + return (uint8x16_t)__a; +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u8_s32 (int32x4_t __a) +{ + return (uint8x16_t)__a; +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u8_u16 (uint16x8_t __a) +{ + return (uint8x16_t)__a; +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u8_u32 (uint32x4_t __a) +{ + return (uint8x16_t)__a; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u16_p8 (poly8x16_t __a) +{ + return (uint16x8_t)__a; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u16_p16 (poly16x8_t __a) +{ + return (uint16x8_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u16_f16 (float16x8_t __a) +{ + return (uint16x8_t) __a; +} +#endif + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u16_f32 (float32x4_t __a) +{ + return (uint16x8_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u16_p64 (poly64x2_t __a) +{ + return (uint16x8_t)__a; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u16_p128 (poly128_t __a) +{ + return (uint16x8_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u16_s64 (int64x2_t __a) +{ + return (uint16x8_t)__a; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u16_u64 (uint64x2_t __a) +{ + return (uint16x8_t)__a; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u16_s8 (int8x16_t __a) +{ + return (uint16x8_t)__a; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u16_s16 (int16x8_t __a) +{ + return (uint16x8_t)__a; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u16_s32 (int32x4_t __a) +{ + return (uint16x8_t)__a; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u16_u8 (uint8x16_t __a) +{ + return (uint16x8_t)__a; +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u16_u32 (uint32x4_t __a) +{ + return (uint16x8_t)__a; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u32_p8 (poly8x16_t __a) +{ + return (uint32x4_t)__a; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u32_p16 (poly16x8_t __a) +{ + return (uint32x4_t)__a; +} + +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u32_f16 (float16x8_t __a) +{ + return (uint32x4_t) __a; +} +#endif + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u32_f32 (float32x4_t __a) +{ + return (uint32x4_t)__a; +} + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u32_p64 (poly64x2_t __a) +{ + return (uint32x4_t)__a; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u32_p128 (poly128_t __a) +{ + return (uint32x4_t)__a; +} + +#pragma GCC pop_options +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u32_s64 (int64x2_t __a) +{ + return (uint32x4_t)__a; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u32_u64 (uint64x2_t __a) +{ + return (uint32x4_t)__a; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u32_s8 (int8x16_t __a) +{ + return (uint32x4_t)__a; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u32_s16 (int16x8_t __a) +{ + return (uint32x4_t)__a; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u32_s32 (int32x4_t __a) +{ + return (uint32x4_t)__a; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u32_u8 (uint8x16_t __a) +{ + return (uint32x4_t)__a; +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vreinterpretq_u32_u16 (uint16x8_t __a) +{ + return (uint32x4_t)__a; +} + + +#pragma GCC push_options +#pragma GCC target ("fpu=crypto-neon-fp-armv8") +__extension__ extern __inline poly128_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vldrq_p128 (poly128_t const * __ptr) +{ +#ifdef __ARM_BIG_ENDIAN + poly64_t* __ptmp = (poly64_t*) __ptr; + poly64_t __d0 = vld1_p64 (__ptmp); + poly64_t __d1 = vld1_p64 (__ptmp + 1); + return vreinterpretq_p128_p64 (vcombine_p64 (__d1, __d0)); +#else + return vreinterpretq_p128_p64 (vld1q_p64 ((poly64_t*) __ptr)); +#endif +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vstrq_p128 (poly128_t * __ptr, poly128_t __val) +{ +#ifdef __ARM_BIG_ENDIAN + poly64x2_t __tmp = vreinterpretq_p64_p128 (__val); + poly64_t __d0 = vget_high_p64 (__tmp); + poly64_t __d1 = vget_low_p64 (__tmp); + vst1q_p64 ((poly64_t*) __ptr, vcombine_p64 (__d0, __d1)); +#else + vst1q_p64 ((poly64_t*) __ptr, vreinterpretq_p64_p128 (__val)); +#endif +} + +/* The vceq_p64 intrinsic does not map to a single instruction. + Instead we emulate it by performing a 32-bit variant of the vceq + and applying a pairwise min reduction to the result. + vceq_u32 will produce two 32-bit halves, each of which will contain either + all ones or all zeros depending on whether the corresponding 32-bit + halves of the poly64_t were equal. The whole poly64_t values are equal + if and only if both halves are equal, i.e. vceq_u32 returns all ones. + If the result is all zeroes for any half then the whole result is zeroes. + This is what the pairwise min reduction achieves. */ + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceq_p64 (poly64x1_t __a, poly64x1_t __b) +{ + uint32x2_t __t_a = vreinterpret_u32_p64 (__a); + uint32x2_t __t_b = vreinterpret_u32_p64 (__b); + uint32x2_t __c = vceq_u32 (__t_a, __t_b); + uint32x2_t __m = vpmin_u32 (__c, __c); + return vreinterpret_u64_u32 (__m); +} + +/* The vtst_p64 intrinsic does not map to a single instruction. + We emulate it in way similar to vceq_p64 above but here we do + a reduction with max since if any two corresponding bits + in the two poly64_t's match, then the whole result must be all ones. */ + +__extension__ extern __inline uint64x1_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtst_p64 (poly64x1_t __a, poly64x1_t __b) +{ + uint32x2_t __t_a = vreinterpret_u32_p64 (__a); + uint32x2_t __t_b = vreinterpret_u32_p64 (__b); + uint32x2_t __c = vtst_u32 (__t_a, __t_b); + uint32x2_t __m = vpmax_u32 (__c, __c); + return vreinterpret_u64_u32 (__m); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaeseq_u8 (uint8x16_t __data, uint8x16_t __key) +{ + return __builtin_arm_crypto_aese (__data, __key); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaesdq_u8 (uint8x16_t __data, uint8x16_t __key) +{ + return __builtin_arm_crypto_aesd (__data, __key); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaesmcq_u8 (uint8x16_t __data) +{ + return __builtin_arm_crypto_aesmc (__data); +} + +__extension__ extern __inline uint8x16_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaesimcq_u8 (uint8x16_t __data) +{ + return __builtin_arm_crypto_aesimc (__data); +} + +__extension__ extern __inline uint32_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsha1h_u32 (uint32_t __hash_e) +{ + uint32x4_t __t = vdupq_n_u32 (0); + __t = vsetq_lane_u32 (__hash_e, __t, 0); + __t = __builtin_arm_crypto_sha1h (__t); + return vgetq_lane_u32 (__t, 0); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsha1cq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk) +{ + uint32x4_t __t = vdupq_n_u32 (0); + __t = vsetq_lane_u32 (__hash_e, __t, 0); + return __builtin_arm_crypto_sha1c (__hash_abcd, __t, __wk); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsha1pq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk) +{ + uint32x4_t __t = vdupq_n_u32 (0); + __t = vsetq_lane_u32 (__hash_e, __t, 0); + return __builtin_arm_crypto_sha1p (__hash_abcd, __t, __wk); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsha1mq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk) +{ + uint32x4_t __t = vdupq_n_u32 (0); + __t = vsetq_lane_u32 (__hash_e, __t, 0); + return __builtin_arm_crypto_sha1m (__hash_abcd, __t, __wk); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsha1su0q_u32 (uint32x4_t __w0_3, uint32x4_t __w4_7, uint32x4_t __w8_11) +{ + return __builtin_arm_crypto_sha1su0 (__w0_3, __w4_7, __w8_11); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsha1su1q_u32 (uint32x4_t __tw0_3, uint32x4_t __w12_15) +{ + return __builtin_arm_crypto_sha1su1 (__tw0_3, __w12_15); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsha256hq_u32 (uint32x4_t __hash_abcd, uint32x4_t __hash_efgh, uint32x4_t __wk) +{ + return __builtin_arm_crypto_sha256h (__hash_abcd, __hash_efgh, __wk); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsha256h2q_u32 (uint32x4_t __hash_abcd, uint32x4_t __hash_efgh, uint32x4_t __wk) +{ + return __builtin_arm_crypto_sha256h2 (__hash_abcd, __hash_efgh, __wk); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsha256su0q_u32 (uint32x4_t __w0_3, uint32x4_t __w4_7) +{ + return __builtin_arm_crypto_sha256su0 (__w0_3, __w4_7); +} + +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsha256su1q_u32 (uint32x4_t __tw0_3, uint32x4_t __w8_11, uint32x4_t __w12_15) +{ + return __builtin_arm_crypto_sha256su1 (__tw0_3, __w8_11, __w12_15); +} + +__extension__ extern __inline poly128_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_p64 (poly64_t __a, poly64_t __b) +{ + return (poly128_t) __builtin_arm_crypto_vmullp64 ((uint64_t) __a, (uint64_t) __b); +} + +__extension__ extern __inline poly128_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_high_p64 (poly64x2_t __a, poly64x2_t __b) +{ + poly64_t __t1 = vget_high_p64 (__a); + poly64_t __t2 = vget_high_p64 (__b); + + return (poly128_t) __builtin_arm_crypto_vmullp64 ((uint64_t) __t1, (uint64_t) __t2); +} + +#pragma GCC pop_options + + /* Intrinsics for FP16 instructions. */ +#pragma GCC push_options +#pragma GCC target ("fpu=neon-fp-armv8") +#if defined (__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabd_f16 (float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vabdv4hf (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabdq_f16 (float16x8_t __a, float16x8_t __b) +{ + return __builtin_neon_vabdv8hf (__a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabs_f16 (float16x4_t __a) +{ + return __builtin_neon_vabsv4hf (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vabsq_f16 (float16x8_t __a) +{ + return __builtin_neon_vabsv8hf (__a); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vadd_f16 (float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vaddv4hf (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vaddq_f16 (float16x8_t __a, float16x8_t __b) +{ + return __builtin_neon_vaddv8hf (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcage_f16 (float16x4_t __a, float16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vcagev4hf (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcageq_f16 (float16x8_t __a, float16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vcagev8hf (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcagt_f16 (float16x4_t __a, float16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vcagtv4hf (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcagtq_f16 (float16x8_t __a, float16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vcagtv8hf (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcale_f16 (float16x4_t __a, float16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vcalev4hf (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcaleq_f16 (float16x8_t __a, float16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vcalev8hf (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcalt_f16 (float16x4_t __a, float16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vcaltv4hf (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcaltq_f16 (float16x8_t __a, float16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vcaltv8hf (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceq_f16 (float16x4_t __a, float16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vceqv4hf (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceqq_f16 (float16x8_t __a, float16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vceqv8hf (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceqz_f16 (float16x4_t __a) +{ + return (uint16x4_t)__builtin_neon_vceqzv4hf (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vceqzq_f16 (float16x8_t __a) +{ + return (uint16x8_t)__builtin_neon_vceqzv8hf (__a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcge_f16 (float16x4_t __a, float16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vcgev4hf (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgeq_f16 (float16x8_t __a, float16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vcgev8hf (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgez_f16 (float16x4_t __a) +{ + return (uint16x4_t)__builtin_neon_vcgezv4hf (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgezq_f16 (float16x8_t __a) +{ + return (uint16x8_t)__builtin_neon_vcgezv8hf (__a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgt_f16 (float16x4_t __a, float16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vcgtv4hf (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgtq_f16 (float16x8_t __a, float16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vcgtv8hf (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgtz_f16 (float16x4_t __a) +{ + return (uint16x4_t)__builtin_neon_vcgtzv4hf (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcgtzq_f16 (float16x8_t __a) +{ + return (uint16x8_t)__builtin_neon_vcgtzv8hf (__a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcle_f16 (float16x4_t __a, float16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vclev4hf (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcleq_f16 (float16x8_t __a, float16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vclev8hf (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclez_f16 (float16x4_t __a) +{ + return (uint16x4_t)__builtin_neon_vclezv4hf (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclezq_f16 (float16x8_t __a) +{ + return (uint16x8_t)__builtin_neon_vclezv8hf (__a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vclt_f16 (float16x4_t __a, float16x4_t __b) +{ + return (uint16x4_t)__builtin_neon_vcltv4hf (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcltq_f16 (float16x8_t __a, float16x8_t __b) +{ + return (uint16x8_t)__builtin_neon_vcltv8hf (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcltz_f16 (float16x4_t __a) +{ + return (uint16x4_t)__builtin_neon_vcltzv4hf (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcltzq_f16 (float16x8_t __a) +{ + return (uint16x8_t)__builtin_neon_vcltzv8hf (__a); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_f16_s16 (int16x4_t __a) +{ + return (float16x4_t)__builtin_neon_vcvtsv4hi (__a); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_f16_u16 (uint16x4_t __a) +{ + return (float16x4_t)__builtin_neon_vcvtuv4hi ((int16x4_t)__a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_s16_f16 (float16x4_t __a) +{ + return (int16x4_t)__builtin_neon_vcvtsv4hf (__a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_u16_f16 (float16x4_t __a) +{ + return (uint16x4_t)__builtin_neon_vcvtuv4hf (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_f16_s16 (int16x8_t __a) +{ + return (float16x8_t)__builtin_neon_vcvtsv8hi (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_f16_u16 (uint16x8_t __a) +{ + return (float16x8_t)__builtin_neon_vcvtuv8hi ((int16x8_t)__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_s16_f16 (float16x8_t __a) +{ + return (int16x8_t)__builtin_neon_vcvtsv8hf (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_u16_f16 (float16x8_t __a) +{ + return (uint16x8_t)__builtin_neon_vcvtuv8hf (__a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvta_s16_f16 (float16x4_t __a) +{ + return __builtin_neon_vcvtasv4hf (__a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvta_u16_f16 (float16x4_t __a) +{ + return (uint16x4_t)__builtin_neon_vcvtauv4hf (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtaq_s16_f16 (float16x8_t __a) +{ + return __builtin_neon_vcvtasv8hf (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtaq_u16_f16 (float16x8_t __a) +{ + return (uint16x8_t)__builtin_neon_vcvtauv8hf (__a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtm_s16_f16 (float16x4_t __a) +{ + return __builtin_neon_vcvtmsv4hf (__a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtm_u16_f16 (float16x4_t __a) +{ + return (uint16x4_t)__builtin_neon_vcvtmuv4hf (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtmq_s16_f16 (float16x8_t __a) +{ + return __builtin_neon_vcvtmsv8hf (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtmq_u16_f16 (float16x8_t __a) +{ + return (uint16x8_t)__builtin_neon_vcvtmuv8hf (__a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtn_s16_f16 (float16x4_t __a) +{ + return __builtin_neon_vcvtnsv4hf (__a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtn_u16_f16 (float16x4_t __a) +{ + return (uint16x4_t)__builtin_neon_vcvtnuv4hf (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtnq_s16_f16 (float16x8_t __a) +{ + return __builtin_neon_vcvtnsv8hf (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtnq_u16_f16 (float16x8_t __a) +{ + return (uint16x8_t)__builtin_neon_vcvtnuv8hf (__a); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtp_s16_f16 (float16x4_t __a) +{ + return __builtin_neon_vcvtpsv4hf (__a); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtp_u16_f16 (float16x4_t __a) +{ + return (uint16x4_t)__builtin_neon_vcvtpuv4hf (__a); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtpq_s16_f16 (float16x8_t __a) +{ + return __builtin_neon_vcvtpsv8hf (__a); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtpq_u16_f16 (float16x8_t __a) +{ + return (uint16x8_t)__builtin_neon_vcvtpuv8hf (__a); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_n_f16_s16 (int16x4_t __a, const int __b) +{ + return __builtin_neon_vcvts_nv4hi (__a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_n_f16_u16 (uint16x4_t __a, const int __b) +{ + return __builtin_neon_vcvtu_nv4hi ((int16x4_t)__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_n_f16_s16 (int16x8_t __a, const int __b) +{ + return __builtin_neon_vcvts_nv8hi (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_n_f16_u16 (uint16x8_t __a, const int __b) +{ + return __builtin_neon_vcvtu_nv8hi ((int16x8_t)__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_n_s16_f16 (float16x4_t __a, const int __b) +{ + return __builtin_neon_vcvts_nv4hf (__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvt_n_u16_f16 (float16x4_t __a, const int __b) +{ + return (uint16x4_t)__builtin_neon_vcvtu_nv4hf (__a, __b); +} + +__extension__ extern __inline int16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_n_s16_f16 (float16x8_t __a, const int __b) +{ + return __builtin_neon_vcvts_nv8hf (__a, __b); +} + +__extension__ extern __inline uint16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vcvtq_n_u16_f16 (float16x8_t __a, const int __b) +{ + return (uint16x8_t)__builtin_neon_vcvtu_nv8hf (__a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vfma_f16 (float16x4_t __a, float16x4_t __b, float16x4_t __c) +{ + return __builtin_neon_vfmav4hf (__a, __b, __c); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vfmaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) +{ + return __builtin_neon_vfmav8hf (__a, __b, __c); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vfms_f16 (float16x4_t __a, float16x4_t __b, float16x4_t __c) +{ + return __builtin_neon_vfmsv4hf (__a, __b, __c); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vfmsq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c) +{ + return __builtin_neon_vfmsv8hf (__a, __b, __c); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmax_f16 (float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vmaxfv4hf (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmaxq_f16 (float16x8_t __a, float16x8_t __b) +{ + return __builtin_neon_vmaxfv8hf (__a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmaxnm_f16 (float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vmaxnmv4hf (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmaxnmq_f16 (float16x8_t __a, float16x8_t __b) +{ + return __builtin_neon_vmaxnmv8hf (__a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmin_f16 (float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vminfv4hf (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vminq_f16 (float16x8_t __a, float16x8_t __b) +{ + return __builtin_neon_vminfv8hf (__a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vminnm_f16 (float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vminnmv4hf (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vminnmq_f16 (float16x8_t __a, float16x8_t __b) +{ + return __builtin_neon_vminnmv8hf (__a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_f16 (float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vmulfv4hf (__a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_lane_f16 (float16x4_t __a, float16x4_t __b, const int __c) +{ + return __builtin_neon_vmul_lanev4hf (__a, __b, __c); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmul_n_f16 (float16x4_t __a, float16_t __b) +{ + return __builtin_neon_vmul_nv4hf (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_f16 (float16x8_t __a, float16x8_t __b) +{ + return __builtin_neon_vmulfv8hf (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_lane_f16 (float16x8_t __a, float16x4_t __b, const int __c) +{ + return __builtin_neon_vmul_lanev8hf (__a, __b, __c); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmulq_n_f16 (float16x8_t __a, float16_t __b) +{ + return __builtin_neon_vmul_nv8hf (__a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vneg_f16 (float16x4_t __a) +{ + return __builtin_neon_vnegv4hf (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vnegq_f16 (float16x8_t __a) +{ + return __builtin_neon_vnegv8hf (__a); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpadd_f16 (float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vpaddv4hf (__a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpmax_f16 (float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vpmaxfv4hf (__a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vpmin_f16 (float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vpminfv4hf (__a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrecpe_f16 (float16x4_t __a) +{ + return __builtin_neon_vrecpev4hf (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrecpeq_f16 (float16x8_t __a) +{ + return __builtin_neon_vrecpev8hf (__a); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrnd_f16 (float16x4_t __a) +{ + return __builtin_neon_vrndv4hf (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndq_f16 (float16x8_t __a) +{ + return __builtin_neon_vrndv8hf (__a); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrnda_f16 (float16x4_t __a) +{ + return __builtin_neon_vrndav4hf (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndaq_f16 (float16x8_t __a) +{ + return __builtin_neon_vrndav8hf (__a); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndm_f16 (float16x4_t __a) +{ + return __builtin_neon_vrndmv4hf (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndmq_f16 (float16x8_t __a) +{ + return __builtin_neon_vrndmv8hf (__a); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndn_f16 (float16x4_t __a) +{ + return __builtin_neon_vrndnv4hf (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndnq_f16 (float16x8_t __a) +{ + return __builtin_neon_vrndnv8hf (__a); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndp_f16 (float16x4_t __a) +{ + return __builtin_neon_vrndpv4hf (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndpq_f16 (float16x8_t __a) +{ + return __builtin_neon_vrndpv8hf (__a); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndx_f16 (float16x4_t __a) +{ + return __builtin_neon_vrndxv4hf (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrndxq_f16 (float16x8_t __a) +{ + return __builtin_neon_vrndxv8hf (__a); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsqrte_f16 (float16x4_t __a) +{ + return __builtin_neon_vrsqrtev4hf (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsqrteq_f16 (float16x8_t __a) +{ + return __builtin_neon_vrsqrtev8hf (__a); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrecps_f16 (float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vrecpsv4hf (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrecpsq_f16 (float16x8_t __a, float16x8_t __b) +{ + return __builtin_neon_vrecpsv8hf (__a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsqrts_f16 (float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vrsqrtsv4hf (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrsqrtsq_f16 (float16x8_t __a, float16x8_t __b) +{ + return __builtin_neon_vrsqrtsv8hf (__a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsub_f16 (float16x4_t __a, float16x4_t __b) +{ + return __builtin_neon_vsubv4hf (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vsubq_f16 (float16x8_t __a, float16x8_t __b) +{ + return __builtin_neon_vsubv8hf (__a, __b); +} + +#endif /* __ARM_FEATURE_VECTOR_FP16_ARITHMETIC. */ +#pragma GCC pop_options + + /* Half-precision data processing intrinsics. */ +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbsl_f16 (uint16x4_t __a, float16x4_t __b, float16x4_t __c) +{ + return __builtin_neon_vbslv4hf ((int16x4_t)__a, __b, __c); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vbslq_f16 (uint16x8_t __a, float16x8_t __b, float16x8_t __c) +{ + return __builtin_neon_vbslv8hf ((int16x8_t)__a, __b, __c); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_n_f16 (float16_t __a) +{ + return __builtin_neon_vdup_nv4hf (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_n_f16 (float16_t __a) +{ + return __builtin_neon_vdup_nv8hf (__a); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdup_lane_f16 (float16x4_t __a, const int __b) +{ + return __builtin_neon_vdup_lanev4hf (__a, __b); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vdupq_lane_f16 (float16x4_t __a, const int __b) +{ + return __builtin_neon_vdup_lanev8hf (__a, __b); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vext_f16 (float16x4_t __a, float16x4_t __b, const int __c) +{ + return __builtin_neon_vextv4hf (__a, __b, __c); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vextq_f16 (float16x8_t __a, float16x8_t __b, const int __c) +{ + return __builtin_neon_vextv8hf (__a, __b, __c); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmov_n_f16 (float16_t __a) +{ + return __builtin_neon_vdup_nv4hf (__a); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmovq_n_f16 (float16_t __a) +{ + return __builtin_neon_vdup_nv8hf (__a); +} + +__extension__ extern __inline float16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64_f16 (float16x4_t __a) +{ + return (float16x4_t)__builtin_shuffle (__a, (uint16x4_t){ 3, 2, 1, 0 }); +} + +__extension__ extern __inline float16x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vrev64q_f16 (float16x8_t __a) +{ + return + (float16x8_t)__builtin_shuffle (__a, + (uint16x8_t){ 3, 2, 1, 0, 7, 6, 5, 4 }); +} + +__extension__ extern __inline float16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrn_f16 (float16x4_t __a, float16x4_t __b) +{ + float16x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t){ 5, 1, 7, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t){ 4, 0, 6, 2 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t){ 0, 4, 2, 6 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t){ 1, 5, 3, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline float16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vtrnq_f16 (float16x8_t __a, float16x8_t __b) +{ + float16x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, + (uint16x8_t){ 9, 1, 11, 3, 13, 5, 15, 7 }); + __rv.val[1] = __builtin_shuffle (__a, __b, + (uint16x8_t){ 8, 0, 10, 2, 12, 4, 14, 6 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, + (uint16x8_t){ 0, 8, 2, 10, 4, 12, 6, 14 }); + __rv.val[1] = __builtin_shuffle (__a, __b, + (uint16x8_t){ 1, 9, 3, 11, 5, 13, 7, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline float16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzp_f16 (float16x4_t __a, float16x4_t __b) +{ + float16x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t){ 5, 7, 1, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t){ 4, 6, 0, 2 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t){ 0, 2, 4, 6 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t){ 1, 3, 5, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline float16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vuzpq_f16 (float16x8_t __a, float16x8_t __b) +{ + float16x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 5, 7, 1, 3, 13, 15, 9, 11 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 4, 6, 0, 2, 12, 14, 8, 10 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, + (uint16x8_t){ 0, 2, 4, 6, 8, 10, 12, 14 }); + __rv.val[1] = __builtin_shuffle (__a, __b, + (uint16x8_t){ 1, 3, 5, 7, 9, 11, 13, 15 }); +#endif + return __rv; +} + +__extension__ extern __inline float16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzip_f16 (float16x4_t __a, float16x4_t __b) +{ + float16x4x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t){ 6, 2, 7, 3 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t){ 4, 0, 5, 1 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t){ 0, 4, 1, 5 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t){ 2, 6, 3, 7 }); +#endif + return __rv; +} + +__extension__ extern __inline float16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vzipq_f16 (float16x8_t __a, float16x8_t __b) +{ + float16x8x2_t __rv; +#ifdef __ARM_BIG_ENDIAN + __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 10, 2, 11, 3, 8, 0, 9, 1 }); + __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t) + { 14, 6, 15, 7, 12, 4, 13, 5 }); +#else + __rv.val[0] = __builtin_shuffle (__a, __b, + (uint16x8_t){ 0, 8, 1, 9, 2, 10, 3, 11 }); + __rv.val[1] = __builtin_shuffle (__a, __b, + (uint16x8_t){ 4, 12, 5, 13, 6, 14, 7, 15 }); +#endif + return __rv; +} + +#endif + +#ifdef __cplusplus +} +#endif + +#pragma GCC pop_options + +#endif +#endif diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/float.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/float.h new file mode 100644 index 0000000..658017f --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/float.h @@ -0,0 +1,506 @@ +/* Copyright (C) 2002-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* + * ISO C Standard: 5.2.4.2.2 Characteristics of floating types + */ + +#ifndef _FLOAT_H___ +#define _FLOAT_H___ + +/* Radix of exponent representation, b. */ +#undef FLT_RADIX +#define FLT_RADIX __FLT_RADIX__ + +/* Number of base-FLT_RADIX digits in the significand, p. */ +#undef FLT_MANT_DIG +#undef DBL_MANT_DIG +#undef LDBL_MANT_DIG +#define FLT_MANT_DIG __FLT_MANT_DIG__ +#define DBL_MANT_DIG __DBL_MANT_DIG__ +#define LDBL_MANT_DIG __LDBL_MANT_DIG__ + +/* Number of decimal digits, q, such that any floating-point number with q + decimal digits can be rounded into a floating-point number with p radix b + digits and back again without change to the q decimal digits, + + p * log10(b) if b is a power of 10 + floor((p - 1) * log10(b)) otherwise +*/ +#undef FLT_DIG +#undef DBL_DIG +#undef LDBL_DIG +#define FLT_DIG __FLT_DIG__ +#define DBL_DIG __DBL_DIG__ +#define LDBL_DIG __LDBL_DIG__ + +/* Minimum int x such that FLT_RADIX**(x-1) is a normalized float, emin */ +#undef FLT_MIN_EXP +#undef DBL_MIN_EXP +#undef LDBL_MIN_EXP +#define FLT_MIN_EXP __FLT_MIN_EXP__ +#define DBL_MIN_EXP __DBL_MIN_EXP__ +#define LDBL_MIN_EXP __LDBL_MIN_EXP__ + +/* Minimum negative integer such that 10 raised to that power is in the + range of normalized floating-point numbers, + + ceil(log10(b) * (emin - 1)) +*/ +#undef FLT_MIN_10_EXP +#undef DBL_MIN_10_EXP +#undef LDBL_MIN_10_EXP +#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__ +#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__ +#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__ + +/* Maximum int x such that FLT_RADIX**(x-1) is a representable float, emax. */ +#undef FLT_MAX_EXP +#undef DBL_MAX_EXP +#undef LDBL_MAX_EXP +#define FLT_MAX_EXP __FLT_MAX_EXP__ +#define DBL_MAX_EXP __DBL_MAX_EXP__ +#define LDBL_MAX_EXP __LDBL_MAX_EXP__ + +/* Maximum integer such that 10 raised to that power is in the range of + representable finite floating-point numbers, + + floor(log10((1 - b**-p) * b**emax)) +*/ +#undef FLT_MAX_10_EXP +#undef DBL_MAX_10_EXP +#undef LDBL_MAX_10_EXP +#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__ +#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__ +#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__ + +/* Maximum representable finite floating-point number, + + (1 - b**-p) * b**emax +*/ +#undef FLT_MAX +#undef DBL_MAX +#undef LDBL_MAX +#define FLT_MAX __FLT_MAX__ +#define DBL_MAX __DBL_MAX__ +#define LDBL_MAX __LDBL_MAX__ + +/* The difference between 1 and the least value greater than 1 that is + representable in the given floating point type, b**1-p. */ +#undef FLT_EPSILON +#undef DBL_EPSILON +#undef LDBL_EPSILON +#define FLT_EPSILON __FLT_EPSILON__ +#define DBL_EPSILON __DBL_EPSILON__ +#define LDBL_EPSILON __LDBL_EPSILON__ + +/* Minimum normalized positive floating-point number, b**(emin - 1). */ +#undef FLT_MIN +#undef DBL_MIN +#undef LDBL_MIN +#define FLT_MIN __FLT_MIN__ +#define DBL_MIN __DBL_MIN__ +#define LDBL_MIN __LDBL_MIN__ + +/* Addition rounds to 0: zero, 1: nearest, 2: +inf, 3: -inf, -1: unknown. */ +/* ??? This is supposed to change with calls to fesetround in . */ +#undef FLT_ROUNDS +#define FLT_ROUNDS 1 + +#if (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) \ + || (defined (__cplusplus) && __cplusplus >= 201103L) +/* The floating-point expression evaluation method. The precise + definitions of these values are generalised to include support for + the interchange and extended types defined in ISO/IEC TS 18661-3. + Prior to this (for C99/C11) the definitions were: + + -1 indeterminate + 0 evaluate all operations and constants just to the range and + precision of the type + 1 evaluate operations and constants of type float and double + to the range and precision of the double type, evaluate + long double operations and constants to the range and + precision of the long double type + 2 evaluate all operations and constants to the range and + precision of the long double type + + The TS 18661-3 definitions are: + + -1 indeterminate + 0 evaluate all operations and constants, whose semantic type has + at most the range and precision of float, to the range and + precision of float; evaluate all other operations and constants + to the range and precision of the semantic type. + 1 evaluate all operations and constants, whose semantic type has + at most the range and precision of double, to the range and + precision of double; evaluate all other operations and constants + to the range and precision of the semantic type. + 2 evaluate all operations and constants, whose semantic type has + at most the range and precision of long double, to the range and + precision of long double; evaluate all other operations and + constants to the range and precision of the semantic type. + N where _FloatN is a supported interchange floating type + evaluate all operations and constants, whose semantic type has + at most the range and precision of the _FloatN type, to the + range and precision of the _FloatN type; evaluate all other + operations and constants to the range and precision of the + semantic type. + N + 1, where _FloatNx is a supported extended floating type + evaluate operations and constants, whose semantic type has at + most the range and precision of the _FloatNx type, to the range + and precision of the _FloatNx type; evaluate all other + operations and constants to the range and precision of the + semantic type. + + The compiler predefines two macros: + + __FLT_EVAL_METHOD__ + Which, depending on the value given for + -fpermitted-flt-eval-methods, may be limited to only those values + for FLT_EVAL_METHOD defined in C99/C11. + + __FLT_EVAL_METHOD_TS_18661_3__ + Which always permits the values for FLT_EVAL_METHOD defined in + ISO/IEC TS 18661-3. + + Here we want to use __FLT_EVAL_METHOD__, unless + __STDC_WANT_IEC_60559_TYPES_EXT__ is defined, in which case the user + is specifically asking for the ISO/IEC TS 18661-3 types, so we use + __FLT_EVAL_METHOD_TS_18661_3__. + + ??? This ought to change with the setting of the fp control word; + the value provided by the compiler assumes the widest setting. */ +#undef FLT_EVAL_METHOD +#ifdef __STDC_WANT_IEC_60559_TYPES_EXT__ +#define FLT_EVAL_METHOD __FLT_EVAL_METHOD_TS_18661_3__ +#else +#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__ +#endif + +/* Number of decimal digits, n, such that any floating-point number in the + widest supported floating type with pmax radix b digits can be rounded + to a floating-point number with n decimal digits and back again without + change to the value, + + pmax * log10(b) if b is a power of 10 + ceil(1 + pmax * log10(b)) otherwise +*/ +#undef DECIMAL_DIG +#define DECIMAL_DIG __DECIMAL_DIG__ + +#endif /* C99 */ + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L +/* Versions of DECIMAL_DIG for each floating-point type. */ +#undef FLT_DECIMAL_DIG +#undef DBL_DECIMAL_DIG +#undef LDBL_DECIMAL_DIG +#define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__ +#define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__ +#define LDBL_DECIMAL_DIG __LDBL_DECIMAL_DIG__ + +/* Whether types support subnormal numbers. */ +#undef FLT_HAS_SUBNORM +#undef DBL_HAS_SUBNORM +#undef LDBL_HAS_SUBNORM +#define FLT_HAS_SUBNORM __FLT_HAS_DENORM__ +#define DBL_HAS_SUBNORM __DBL_HAS_DENORM__ +#define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__ + +/* Minimum positive values, including subnormals. */ +#undef FLT_TRUE_MIN +#undef DBL_TRUE_MIN +#undef LDBL_TRUE_MIN +#define FLT_TRUE_MIN __FLT_DENORM_MIN__ +#define DBL_TRUE_MIN __DBL_DENORM_MIN__ +#define LDBL_TRUE_MIN __LDBL_DENORM_MIN__ + +#endif /* C11 */ + +#ifdef __STDC_WANT_IEC_60559_BFP_EXT__ +/* Number of decimal digits for which conversions between decimal + character strings and binary formats, in both directions, are + correctly rounded. */ +#define CR_DECIMAL_DIG __UINTMAX_MAX__ +#endif + +#ifdef __STDC_WANT_IEC_60559_TYPES_EXT__ +/* Constants for _FloatN and _FloatNx types from TS 18661-3. See + comments above for their semantics. */ + +#ifdef __FLT16_MANT_DIG__ +#undef FLT16_MANT_DIG +#define FLT16_MANT_DIG __FLT16_MANT_DIG__ +#undef FLT16_DIG +#define FLT16_DIG __FLT16_DIG__ +#undef FLT16_MIN_EXP +#define FLT16_MIN_EXP __FLT16_MIN_EXP__ +#undef FLT16_MIN_10_EXP +#define FLT16_MIN_10_EXP __FLT16_MIN_10_EXP__ +#undef FLT16_MAX_EXP +#define FLT16_MAX_EXP __FLT16_MAX_EXP__ +#undef FLT16_MAX_10_EXP +#define FLT16_MAX_10_EXP __FLT16_MAX_10_EXP__ +#undef FLT16_MAX +#define FLT16_MAX __FLT16_MAX__ +#undef FLT16_EPSILON +#define FLT16_EPSILON __FLT16_EPSILON__ +#undef FLT16_MIN +#define FLT16_MIN __FLT16_MIN__ +#undef FLT16_DECIMAL_DIG +#define FLT16_DECIMAL_DIG __FLT16_DECIMAL_DIG__ +#undef FLT16_TRUE_MIN +#define FLT16_TRUE_MIN __FLT16_DENORM_MIN__ +#endif /* __FLT16_MANT_DIG__. */ + +#ifdef __FLT32_MANT_DIG__ +#undef FLT32_MANT_DIG +#define FLT32_MANT_DIG __FLT32_MANT_DIG__ +#undef FLT32_DIG +#define FLT32_DIG __FLT32_DIG__ +#undef FLT32_MIN_EXP +#define FLT32_MIN_EXP __FLT32_MIN_EXP__ +#undef FLT32_MIN_10_EXP +#define FLT32_MIN_10_EXP __FLT32_MIN_10_EXP__ +#undef FLT32_MAX_EXP +#define FLT32_MAX_EXP __FLT32_MAX_EXP__ +#undef FLT32_MAX_10_EXP +#define FLT32_MAX_10_EXP __FLT32_MAX_10_EXP__ +#undef FLT32_MAX +#define FLT32_MAX __FLT32_MAX__ +#undef FLT32_EPSILON +#define FLT32_EPSILON __FLT32_EPSILON__ +#undef FLT32_MIN +#define FLT32_MIN __FLT32_MIN__ +#undef FLT32_DECIMAL_DIG +#define FLT32_DECIMAL_DIG __FLT32_DECIMAL_DIG__ +#undef FLT32_TRUE_MIN +#define FLT32_TRUE_MIN __FLT32_DENORM_MIN__ +#endif /* __FLT32_MANT_DIG__. */ + +#ifdef __FLT64_MANT_DIG__ +#undef FLT64_MANT_DIG +#define FLT64_MANT_DIG __FLT64_MANT_DIG__ +#undef FLT64_DIG +#define FLT64_DIG __FLT64_DIG__ +#undef FLT64_MIN_EXP +#define FLT64_MIN_EXP __FLT64_MIN_EXP__ +#undef FLT64_MIN_10_EXP +#define FLT64_MIN_10_EXP __FLT64_MIN_10_EXP__ +#undef FLT64_MAX_EXP +#define FLT64_MAX_EXP __FLT64_MAX_EXP__ +#undef FLT64_MAX_10_EXP +#define FLT64_MAX_10_EXP __FLT64_MAX_10_EXP__ +#undef FLT64_MAX +#define FLT64_MAX __FLT64_MAX__ +#undef FLT64_EPSILON +#define FLT64_EPSILON __FLT64_EPSILON__ +#undef FLT64_MIN +#define FLT64_MIN __FLT64_MIN__ +#undef FLT64_DECIMAL_DIG +#define FLT64_DECIMAL_DIG __FLT64_DECIMAL_DIG__ +#undef FLT64_TRUE_MIN +#define FLT64_TRUE_MIN __FLT64_DENORM_MIN__ +#endif /* __FLT64_MANT_DIG__. */ + +#ifdef __FLT128_MANT_DIG__ +#undef FLT128_MANT_DIG +#define FLT128_MANT_DIG __FLT128_MANT_DIG__ +#undef FLT128_DIG +#define FLT128_DIG __FLT128_DIG__ +#undef FLT128_MIN_EXP +#define FLT128_MIN_EXP __FLT128_MIN_EXP__ +#undef FLT128_MIN_10_EXP +#define FLT128_MIN_10_EXP __FLT128_MIN_10_EXP__ +#undef FLT128_MAX_EXP +#define FLT128_MAX_EXP __FLT128_MAX_EXP__ +#undef FLT128_MAX_10_EXP +#define FLT128_MAX_10_EXP __FLT128_MAX_10_EXP__ +#undef FLT128_MAX +#define FLT128_MAX __FLT128_MAX__ +#undef FLT128_EPSILON +#define FLT128_EPSILON __FLT128_EPSILON__ +#undef FLT128_MIN +#define FLT128_MIN __FLT128_MIN__ +#undef FLT128_DECIMAL_DIG +#define FLT128_DECIMAL_DIG __FLT128_DECIMAL_DIG__ +#undef FLT128_TRUE_MIN +#define FLT128_TRUE_MIN __FLT128_DENORM_MIN__ +#endif /* __FLT128_MANT_DIG__. */ + +#ifdef __FLT32X_MANT_DIG__ +#undef FLT32X_MANT_DIG +#define FLT32X_MANT_DIG __FLT32X_MANT_DIG__ +#undef FLT32X_DIG +#define FLT32X_DIG __FLT32X_DIG__ +#undef FLT32X_MIN_EXP +#define FLT32X_MIN_EXP __FLT32X_MIN_EXP__ +#undef FLT32X_MIN_10_EXP +#define FLT32X_MIN_10_EXP __FLT32X_MIN_10_EXP__ +#undef FLT32X_MAX_EXP +#define FLT32X_MAX_EXP __FLT32X_MAX_EXP__ +#undef FLT32X_MAX_10_EXP +#define FLT32X_MAX_10_EXP __FLT32X_MAX_10_EXP__ +#undef FLT32X_MAX +#define FLT32X_MAX __FLT32X_MAX__ +#undef FLT32X_EPSILON +#define FLT32X_EPSILON __FLT32X_EPSILON__ +#undef FLT32X_MIN +#define FLT32X_MIN __FLT32X_MIN__ +#undef FLT32X_DECIMAL_DIG +#define FLT32X_DECIMAL_DIG __FLT32X_DECIMAL_DIG__ +#undef FLT32X_TRUE_MIN +#define FLT32X_TRUE_MIN __FLT32X_DENORM_MIN__ +#endif /* __FLT32X_MANT_DIG__. */ + +#ifdef __FLT64X_MANT_DIG__ +#undef FLT64X_MANT_DIG +#define FLT64X_MANT_DIG __FLT64X_MANT_DIG__ +#undef FLT64X_DIG +#define FLT64X_DIG __FLT64X_DIG__ +#undef FLT64X_MIN_EXP +#define FLT64X_MIN_EXP __FLT64X_MIN_EXP__ +#undef FLT64X_MIN_10_EXP +#define FLT64X_MIN_10_EXP __FLT64X_MIN_10_EXP__ +#undef FLT64X_MAX_EXP +#define FLT64X_MAX_EXP __FLT64X_MAX_EXP__ +#undef FLT64X_MAX_10_EXP +#define FLT64X_MAX_10_EXP __FLT64X_MAX_10_EXP__ +#undef FLT64X_MAX +#define FLT64X_MAX __FLT64X_MAX__ +#undef FLT64X_EPSILON +#define FLT64X_EPSILON __FLT64X_EPSILON__ +#undef FLT64X_MIN +#define FLT64X_MIN __FLT64X_MIN__ +#undef FLT64X_DECIMAL_DIG +#define FLT64X_DECIMAL_DIG __FLT64X_DECIMAL_DIG__ +#undef FLT64X_TRUE_MIN +#define FLT64X_TRUE_MIN __FLT64X_DENORM_MIN__ +#endif /* __FLT64X_MANT_DIG__. */ + +#ifdef __FLT128X_MANT_DIG__ +#undef FLT128X_MANT_DIG +#define FLT128X_MANT_DIG __FLT128X_MANT_DIG__ +#undef FLT128X_DIG +#define FLT128X_DIG __FLT128X_DIG__ +#undef FLT128X_MIN_EXP +#define FLT128X_MIN_EXP __FLT128X_MIN_EXP__ +#undef FLT128X_MIN_10_EXP +#define FLT128X_MIN_10_EXP __FLT128X_MIN_10_EXP__ +#undef FLT128X_MAX_EXP +#define FLT128X_MAX_EXP __FLT128X_MAX_EXP__ +#undef FLT128X_MAX_10_EXP +#define FLT128X_MAX_10_EXP __FLT128X_MAX_10_EXP__ +#undef FLT128X_MAX +#define FLT128X_MAX __FLT128X_MAX__ +#undef FLT128X_EPSILON +#define FLT128X_EPSILON __FLT128X_EPSILON__ +#undef FLT128X_MIN +#define FLT128X_MIN __FLT128X_MIN__ +#undef FLT128X_DECIMAL_DIG +#define FLT128X_DECIMAL_DIG __FLT128X_DECIMAL_DIG__ +#undef FLT128X_TRUE_MIN +#define FLT128X_TRUE_MIN __FLT128X_DENORM_MIN__ +#endif /* __FLT128X_MANT_DIG__. */ + +#endif /* __STDC_WANT_IEC_60559_TYPES_EXT__. */ + +#ifdef __STDC_WANT_DEC_FP__ +/* Draft Technical Report 24732, extension for decimal floating-point + arithmetic: Characteristic of decimal floating types . */ + +/* Number of base-FLT_RADIX digits in the significand, p. */ +#undef DEC32_MANT_DIG +#undef DEC64_MANT_DIG +#undef DEC128_MANT_DIG +#define DEC32_MANT_DIG __DEC32_MANT_DIG__ +#define DEC64_MANT_DIG __DEC64_MANT_DIG__ +#define DEC128_MANT_DIG __DEC128_MANT_DIG__ + +/* Minimum exponent. */ +#undef DEC32_MIN_EXP +#undef DEC64_MIN_EXP +#undef DEC128_MIN_EXP +#define DEC32_MIN_EXP __DEC32_MIN_EXP__ +#define DEC64_MIN_EXP __DEC64_MIN_EXP__ +#define DEC128_MIN_EXP __DEC128_MIN_EXP__ + +/* Maximum exponent. */ +#undef DEC32_MAX_EXP +#undef DEC64_MAX_EXP +#undef DEC128_MAX_EXP +#define DEC32_MAX_EXP __DEC32_MAX_EXP__ +#define DEC64_MAX_EXP __DEC64_MAX_EXP__ +#define DEC128_MAX_EXP __DEC128_MAX_EXP__ + +/* Maximum representable finite decimal floating-point number + (there are 6, 15, and 33 9s after the decimal points respectively). */ +#undef DEC32_MAX +#undef DEC64_MAX +#undef DEC128_MAX +#define DEC32_MAX __DEC32_MAX__ +#define DEC64_MAX __DEC64_MAX__ +#define DEC128_MAX __DEC128_MAX__ + +/* The difference between 1 and the least value greater than 1 that is + representable in the given floating point type. */ +#undef DEC32_EPSILON +#undef DEC64_EPSILON +#undef DEC128_EPSILON +#define DEC32_EPSILON __DEC32_EPSILON__ +#define DEC64_EPSILON __DEC64_EPSILON__ +#define DEC128_EPSILON __DEC128_EPSILON__ + +/* Minimum normalized positive floating-point number. */ +#undef DEC32_MIN +#undef DEC64_MIN +#undef DEC128_MIN +#define DEC32_MIN __DEC32_MIN__ +#define DEC64_MIN __DEC64_MIN__ +#define DEC128_MIN __DEC128_MIN__ + +/* Minimum subnormal positive floating-point number. */ +#undef DEC32_SUBNORMAL_MIN +#undef DEC64_SUBNORMAL_MIN +#undef DEC128_SUBNORMAL_MIN +#define DEC32_SUBNORMAL_MIN __DEC32_SUBNORMAL_MIN__ +#define DEC64_SUBNORMAL_MIN __DEC64_SUBNORMAL_MIN__ +#define DEC128_SUBNORMAL_MIN __DEC128_SUBNORMAL_MIN__ + +/* The floating-point expression evaluation method. + -1 indeterminate + 0 evaluate all operations and constants just to the range and + precision of the type + 1 evaluate operations and constants of type _Decimal32 + and _Decimal64 to the range and precision of the _Decimal64 + type, evaluate _Decimal128 operations and constants to the + range and precision of the _Decimal128 type; + 2 evaluate all operations and constants to the range and + precision of the _Decimal128 type. */ + +#undef DEC_EVAL_METHOD +#define DEC_EVAL_METHOD __DEC_EVAL_METHOD__ + +#endif /* __STDC_WANT_DEC_FP__ */ + +#endif /* _FLOAT_H___ */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/gcov.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/gcov.h new file mode 100644 index 0000000..0333ecc --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/gcov.h @@ -0,0 +1,41 @@ +/* GCOV interface routines. + Copyright (C) 2017 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free + Software Foundation; either version 3, or (at your option) any later + version. + + GCC is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef GCC_GCOV_H +#define GCC_GCOV_H + +/* Set all counters to zero. */ + +extern void __gcov_reset (void); + +/* Write profile information to a file. */ + +extern void __gcov_dump (void); + +/* Write profile information to a file and reset counters to zero. + The function does operations under a mutex. */ + +extern void __gcov_flush (void); + +#endif /* GCC_GCOV_H */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/iso646.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/iso646.h new file mode 100644 index 0000000..5a6b503 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/iso646.h @@ -0,0 +1,45 @@ +/* Copyright (C) 1997-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* + * ISO C Standard: 7.9 Alternative spellings + */ + +#ifndef _ISO646_H +#define _ISO646_H + +#ifndef __cplusplus +#define and && +#define and_eq &= +#define bitand & +#define bitor | +#define compl ~ +#define not ! +#define not_eq != +#define or || +#define or_eq |= +#define xor ^ +#define xor_eq ^= +#endif + +#endif diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/mmintrin.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/mmintrin.h new file mode 100644 index 0000000..704b54f --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/mmintrin.h @@ -0,0 +1,1836 @@ +/* Copyright (C) 2002-2017 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _MMINTRIN_H_INCLUDED +#define _MMINTRIN_H_INCLUDED + +#ifndef __IWMMXT__ +#error mmintrin.h included without enabling WMMX/WMMX2 instructions (e.g. -march=iwmmxt or -march=iwmmxt2) +#endif + + +#if defined __cplusplus +extern "C" { +/* Intrinsics use C name-mangling. */ +#endif /* __cplusplus */ + +/* The data type intended for user use. */ +typedef unsigned long long __m64, __int64; + +/* Internal data types for implementing the intrinsics. */ +typedef int __v2si __attribute__ ((vector_size (8))); +typedef short __v4hi __attribute__ ((vector_size (8))); +typedef signed char __v8qi __attribute__ ((vector_size (8))); + +/* Provided for source compatibility with MMX. */ +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) +_mm_empty (void) +{ +} + +/* "Convert" __m64 and __int64 into each other. */ +static __inline __m64 +_mm_cvtsi64_m64 (__int64 __i) +{ + return __i; +} + +static __inline __int64 +_mm_cvtm64_si64 (__m64 __i) +{ + return __i; +} + +static __inline int +_mm_cvtsi64_si32 (__int64 __i) +{ + return __i; +} + +static __inline __int64 +_mm_cvtsi32_si64 (int __i) +{ + return (__i & 0xffffffff); +} + +/* Pack the four 16-bit values from M1 into the lower four 8-bit values of + the result, and the four 16-bit values from M2 into the upper four 8-bit + values of the result, all with signed saturation. */ +static __inline __m64 +_mm_packs_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wpackhss ((__v4hi)__m1, (__v4hi)__m2); +} + +/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of + the result, and the two 32-bit values from M2 into the upper two 16-bit + values of the result, all with signed saturation. */ +static __inline __m64 +_mm_packs_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wpackwss ((__v2si)__m1, (__v2si)__m2); +} + +/* Copy the 64-bit value from M1 into the lower 32-bits of the result, and + the 64-bit value from M2 into the upper 32-bits of the result, all with + signed saturation for values that do not fit exactly into 32-bits. */ +static __inline __m64 +_mm_packs_pi64 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wpackdss ((long long)__m1, (long long)__m2); +} + +/* Pack the four 16-bit values from M1 into the lower four 8-bit values of + the result, and the four 16-bit values from M2 into the upper four 8-bit + values of the result, all with unsigned saturation. */ +static __inline __m64 +_mm_packs_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wpackhus ((__v4hi)__m1, (__v4hi)__m2); +} + +/* Pack the two 32-bit values from M1 into the lower two 16-bit values of + the result, and the two 32-bit values from M2 into the upper two 16-bit + values of the result, all with unsigned saturation. */ +static __inline __m64 +_mm_packs_pu32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wpackwus ((__v2si)__m1, (__v2si)__m2); +} + +/* Copy the 64-bit value from M1 into the lower 32-bits of the result, and + the 64-bit value from M2 into the upper 32-bits of the result, all with + unsigned saturation for values that do not fit exactly into 32-bits. */ +static __inline __m64 +_mm_packs_pu64 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wpackdus ((long long)__m1, (long long)__m2); +} + +/* Interleave the four 8-bit values from the high half of M1 with the four + 8-bit values from the high half of M2. */ +static __inline __m64 +_mm_unpackhi_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wunpckihb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* Interleave the two 16-bit values from the high half of M1 with the two + 16-bit values from the high half of M2. */ +static __inline __m64 +_mm_unpackhi_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wunpckihh ((__v4hi)__m1, (__v4hi)__m2); +} + +/* Interleave the 32-bit value from the high half of M1 with the 32-bit + value from the high half of M2. */ +static __inline __m64 +_mm_unpackhi_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wunpckihw ((__v2si)__m1, (__v2si)__m2); +} + +/* Interleave the four 8-bit values from the low half of M1 with the four + 8-bit values from the low half of M2. */ +static __inline __m64 +_mm_unpacklo_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wunpckilb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* Interleave the two 16-bit values from the low half of M1 with the two + 16-bit values from the low half of M2. */ +static __inline __m64 +_mm_unpacklo_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wunpckilh ((__v4hi)__m1, (__v4hi)__m2); +} + +/* Interleave the 32-bit value from the low half of M1 with the 32-bit + value from the low half of M2. */ +static __inline __m64 +_mm_unpacklo_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wunpckilw ((__v2si)__m1, (__v2si)__m2); +} + +/* Take the four 8-bit values from the low half of M1, sign extend them, + and return the result as a vector of four 16-bit quantities. */ +static __inline __m64 +_mm_unpackel_pi8 (__m64 __m1) +{ + return (__m64) __builtin_arm_wunpckelsb ((__v8qi)__m1); +} + +/* Take the two 16-bit values from the low half of M1, sign extend them, + and return the result as a vector of two 32-bit quantities. */ +static __inline __m64 +_mm_unpackel_pi16 (__m64 __m1) +{ + return (__m64) __builtin_arm_wunpckelsh ((__v4hi)__m1); +} + +/* Take the 32-bit value from the low half of M1, and return it sign extended + to 64 bits. */ +static __inline __m64 +_mm_unpackel_pi32 (__m64 __m1) +{ + return (__m64) __builtin_arm_wunpckelsw ((__v2si)__m1); +} + +/* Take the four 8-bit values from the high half of M1, sign extend them, + and return the result as a vector of four 16-bit quantities. */ +static __inline __m64 +_mm_unpackeh_pi8 (__m64 __m1) +{ + return (__m64) __builtin_arm_wunpckehsb ((__v8qi)__m1); +} + +/* Take the two 16-bit values from the high half of M1, sign extend them, + and return the result as a vector of two 32-bit quantities. */ +static __inline __m64 +_mm_unpackeh_pi16 (__m64 __m1) +{ + return (__m64) __builtin_arm_wunpckehsh ((__v4hi)__m1); +} + +/* Take the 32-bit value from the high half of M1, and return it sign extended + to 64 bits. */ +static __inline __m64 +_mm_unpackeh_pi32 (__m64 __m1) +{ + return (__m64) __builtin_arm_wunpckehsw ((__v2si)__m1); +} + +/* Take the four 8-bit values from the low half of M1, zero extend them, + and return the result as a vector of four 16-bit quantities. */ +static __inline __m64 +_mm_unpackel_pu8 (__m64 __m1) +{ + return (__m64) __builtin_arm_wunpckelub ((__v8qi)__m1); +} + +/* Take the two 16-bit values from the low half of M1, zero extend them, + and return the result as a vector of two 32-bit quantities. */ +static __inline __m64 +_mm_unpackel_pu16 (__m64 __m1) +{ + return (__m64) __builtin_arm_wunpckeluh ((__v4hi)__m1); +} + +/* Take the 32-bit value from the low half of M1, and return it zero extended + to 64 bits. */ +static __inline __m64 +_mm_unpackel_pu32 (__m64 __m1) +{ + return (__m64) __builtin_arm_wunpckeluw ((__v2si)__m1); +} + +/* Take the four 8-bit values from the high half of M1, zero extend them, + and return the result as a vector of four 16-bit quantities. */ +static __inline __m64 +_mm_unpackeh_pu8 (__m64 __m1) +{ + return (__m64) __builtin_arm_wunpckehub ((__v8qi)__m1); +} + +/* Take the two 16-bit values from the high half of M1, zero extend them, + and return the result as a vector of two 32-bit quantities. */ +static __inline __m64 +_mm_unpackeh_pu16 (__m64 __m1) +{ + return (__m64) __builtin_arm_wunpckehuh ((__v4hi)__m1); +} + +/* Take the 32-bit value from the high half of M1, and return it zero extended + to 64 bits. */ +static __inline __m64 +_mm_unpackeh_pu32 (__m64 __m1) +{ + return (__m64) __builtin_arm_wunpckehuw ((__v2si)__m1); +} + +/* Add the 8-bit values in M1 to the 8-bit values in M2. */ +static __inline __m64 +_mm_add_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_waddb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2. */ +static __inline __m64 +_mm_add_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_waddh ((__v4hi)__m1, (__v4hi)__m2); +} + +/* Add the 32-bit values in M1 to the 32-bit values in M2. */ +static __inline __m64 +_mm_add_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_waddw ((__v2si)__m1, (__v2si)__m2); +} + +/* Add the 8-bit values in M1 to the 8-bit values in M2 using signed + saturated arithmetic. */ +static __inline __m64 +_mm_adds_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_waddbss ((__v8qi)__m1, (__v8qi)__m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed + saturated arithmetic. */ +static __inline __m64 +_mm_adds_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_waddhss ((__v4hi)__m1, (__v4hi)__m2); +} + +/* Add the 32-bit values in M1 to the 32-bit values in M2 using signed + saturated arithmetic. */ +static __inline __m64 +_mm_adds_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_waddwss ((__v2si)__m1, (__v2si)__m2); +} + +/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned + saturated arithmetic. */ +static __inline __m64 +_mm_adds_pu8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_waddbus ((__v8qi)__m1, (__v8qi)__m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned + saturated arithmetic. */ +static __inline __m64 +_mm_adds_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_waddhus ((__v4hi)__m1, (__v4hi)__m2); +} + +/* Add the 32-bit values in M1 to the 32-bit values in M2 using unsigned + saturated arithmetic. */ +static __inline __m64 +_mm_adds_pu32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_waddwus ((__v2si)__m1, (__v2si)__m2); +} + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1. */ +static __inline __m64 +_mm_sub_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wsubb ((__v8qi)__m1, (__v8qi)__m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1. */ +static __inline __m64 +_mm_sub_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wsubh ((__v4hi)__m1, (__v4hi)__m2); +} + +/* Subtract the 32-bit values in M2 from the 32-bit values in M1. */ +static __inline __m64 +_mm_sub_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wsubw ((__v2si)__m1, (__v2si)__m2); +} + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed + saturating arithmetic. */ +static __inline __m64 +_mm_subs_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wsubbss ((__v8qi)__m1, (__v8qi)__m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using + signed saturating arithmetic. */ +static __inline __m64 +_mm_subs_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wsubhss ((__v4hi)__m1, (__v4hi)__m2); +} + +/* Subtract the 32-bit values in M2 from the 32-bit values in M1 using + signed saturating arithmetic. */ +static __inline __m64 +_mm_subs_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wsubwss ((__v2si)__m1, (__v2si)__m2); +} + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using + unsigned saturating arithmetic. */ +static __inline __m64 +_mm_subs_pu8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wsubbus ((__v8qi)__m1, (__v8qi)__m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using + unsigned saturating arithmetic. */ +static __inline __m64 +_mm_subs_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wsubhus ((__v4hi)__m1, (__v4hi)__m2); +} + +/* Subtract the 32-bit values in M2 from the 32-bit values in M1 using + unsigned saturating arithmetic. */ +static __inline __m64 +_mm_subs_pu32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wsubwus ((__v2si)__m1, (__v2si)__m2); +} + +/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing + four 32-bit intermediate results, which are then summed by pairs to + produce two 32-bit results. */ +static __inline __m64 +_mm_madd_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wmadds ((__v4hi)__m1, (__v4hi)__m2); +} + +/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing + four 32-bit intermediate results, which are then summed by pairs to + produce two 32-bit results. */ +static __inline __m64 +_mm_madd_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wmaddu ((__v4hi)__m1, (__v4hi)__m2); +} + +/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in + M2 and produce the high 16 bits of the 32-bit results. */ +static __inline __m64 +_mm_mulhi_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wmulsm ((__v4hi)__m1, (__v4hi)__m2); +} + +/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in + M2 and produce the high 16 bits of the 32-bit results. */ +static __inline __m64 +_mm_mulhi_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wmulum ((__v4hi)__m1, (__v4hi)__m2); +} + +/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce + the low 16 bits of the results. */ +static __inline __m64 +_mm_mullo_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wmulul ((__v4hi)__m1, (__v4hi)__m2); +} + +/* Shift four 16-bit values in M left by COUNT. */ +static __inline __m64 +_mm_sll_pi16 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_arm_wsllh ((__v4hi)__m, __count); +} + +static __inline __m64 +_mm_slli_pi16 (__m64 __m, int __count) +{ + return (__m64) __builtin_arm_wsllhi ((__v4hi)__m, __count); +} + +/* Shift two 32-bit values in M left by COUNT. */ +static __inline __m64 +_mm_sll_pi32 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_arm_wsllw ((__v2si)__m, __count); +} + +static __inline __m64 +_mm_slli_pi32 (__m64 __m, int __count) +{ + return (__m64) __builtin_arm_wsllwi ((__v2si)__m, __count); +} + +/* Shift the 64-bit value in M left by COUNT. */ +static __inline __m64 +_mm_sll_si64 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_arm_wslld (__m, __count); +} + +static __inline __m64 +_mm_slli_si64 (__m64 __m, int __count) +{ + return (__m64) __builtin_arm_wslldi (__m, __count); +} + +/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */ +static __inline __m64 +_mm_sra_pi16 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_arm_wsrah ((__v4hi)__m, __count); +} + +static __inline __m64 +_mm_srai_pi16 (__m64 __m, int __count) +{ + return (__m64) __builtin_arm_wsrahi ((__v4hi)__m, __count); +} + +/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */ +static __inline __m64 +_mm_sra_pi32 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_arm_wsraw ((__v2si)__m, __count); +} + +static __inline __m64 +_mm_srai_pi32 (__m64 __m, int __count) +{ + return (__m64) __builtin_arm_wsrawi ((__v2si)__m, __count); +} + +/* Shift the 64-bit value in M right by COUNT; shift in the sign bit. */ +static __inline __m64 +_mm_sra_si64 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_arm_wsrad (__m, __count); +} + +static __inline __m64 +_mm_srai_si64 (__m64 __m, int __count) +{ + return (__m64) __builtin_arm_wsradi (__m, __count); +} + +/* Shift four 16-bit values in M right by COUNT; shift in zeros. */ +static __inline __m64 +_mm_srl_pi16 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_arm_wsrlh ((__v4hi)__m, __count); +} + +static __inline __m64 +_mm_srli_pi16 (__m64 __m, int __count) +{ + return (__m64) __builtin_arm_wsrlhi ((__v4hi)__m, __count); +} + +/* Shift two 32-bit values in M right by COUNT; shift in zeros. */ +static __inline __m64 +_mm_srl_pi32 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_arm_wsrlw ((__v2si)__m, __count); +} + +static __inline __m64 +_mm_srli_pi32 (__m64 __m, int __count) +{ + return (__m64) __builtin_arm_wsrlwi ((__v2si)__m, __count); +} + +/* Shift the 64-bit value in M left by COUNT; shift in zeros. */ +static __inline __m64 +_mm_srl_si64 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_arm_wsrld (__m, __count); +} + +static __inline __m64 +_mm_srli_si64 (__m64 __m, int __count) +{ + return (__m64) __builtin_arm_wsrldi (__m, __count); +} + +/* Rotate four 16-bit values in M right by COUNT. */ +static __inline __m64 +_mm_ror_pi16 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_arm_wrorh ((__v4hi)__m, __count); +} + +static __inline __m64 +_mm_rori_pi16 (__m64 __m, int __count) +{ + return (__m64) __builtin_arm_wrorhi ((__v4hi)__m, __count); +} + +/* Rotate two 32-bit values in M right by COUNT. */ +static __inline __m64 +_mm_ror_pi32 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_arm_wrorw ((__v2si)__m, __count); +} + +static __inline __m64 +_mm_rori_pi32 (__m64 __m, int __count) +{ + return (__m64) __builtin_arm_wrorwi ((__v2si)__m, __count); +} + +/* Rotate two 64-bit values in M right by COUNT. */ +static __inline __m64 +_mm_ror_si64 (__m64 __m, __m64 __count) +{ + return (__m64) __builtin_arm_wrord (__m, __count); +} + +static __inline __m64 +_mm_rori_si64 (__m64 __m, int __count) +{ + return (__m64) __builtin_arm_wrordi (__m, __count); +} + +/* Bit-wise AND the 64-bit values in M1 and M2. */ +static __inline __m64 +_mm_and_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_arm_wand (__m1, __m2); +} + +/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the + 64-bit value in M2. */ +static __inline __m64 +_mm_andnot_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_arm_wandn (__m2, __m1); +} + +/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */ +static __inline __m64 +_mm_or_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_arm_wor (__m1, __m2); +} + +/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */ +static __inline __m64 +_mm_xor_si64 (__m64 __m1, __m64 __m2) +{ + return __builtin_arm_wxor (__m1, __m2); +} + +/* Compare eight 8-bit values. The result of the comparison is 0xFF if the + test is true and zero if false. */ +static __inline __m64 +_mm_cmpeq_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wcmpeqb ((__v8qi)__m1, (__v8qi)__m2); +} + +static __inline __m64 +_mm_cmpgt_pi8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wcmpgtsb ((__v8qi)__m1, (__v8qi)__m2); +} + +static __inline __m64 +_mm_cmpgt_pu8 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wcmpgtub ((__v8qi)__m1, (__v8qi)__m2); +} + +/* Compare four 16-bit values. The result of the comparison is 0xFFFF if + the test is true and zero if false. */ +static __inline __m64 +_mm_cmpeq_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wcmpeqh ((__v4hi)__m1, (__v4hi)__m2); +} + +static __inline __m64 +_mm_cmpgt_pi16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wcmpgtsh ((__v4hi)__m1, (__v4hi)__m2); +} + +static __inline __m64 +_mm_cmpgt_pu16 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wcmpgtuh ((__v4hi)__m1, (__v4hi)__m2); +} + +/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if + the test is true and zero if false. */ +static __inline __m64 +_mm_cmpeq_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wcmpeqw ((__v2si)__m1, (__v2si)__m2); +} + +static __inline __m64 +_mm_cmpgt_pi32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wcmpgtsw ((__v2si)__m1, (__v2si)__m2); +} + +static __inline __m64 +_mm_cmpgt_pu32 (__m64 __m1, __m64 __m2) +{ + return (__m64) __builtin_arm_wcmpgtuw ((__v2si)__m1, (__v2si)__m2); +} + +/* Element-wise multiplication of unsigned 16-bit values __B and __C, followed + by accumulate across all elements and __A. */ +static __inline __m64 +_mm_mac_pu16 (__m64 __A, __m64 __B, __m64 __C) +{ + return __builtin_arm_wmacu (__A, (__v4hi)__B, (__v4hi)__C); +} + +/* Element-wise multiplication of signed 16-bit values __B and __C, followed + by accumulate across all elements and __A. */ +static __inline __m64 +_mm_mac_pi16 (__m64 __A, __m64 __B, __m64 __C) +{ + return __builtin_arm_wmacs (__A, (__v4hi)__B, (__v4hi)__C); +} + +/* Element-wise multiplication of unsigned 16-bit values __B and __C, followed + by accumulate across all elements. */ +static __inline __m64 +_mm_macz_pu16 (__m64 __A, __m64 __B) +{ + return __builtin_arm_wmacuz ((__v4hi)__A, (__v4hi)__B); +} + +/* Element-wise multiplication of signed 16-bit values __B and __C, followed + by accumulate across all elements. */ +static __inline __m64 +_mm_macz_pi16 (__m64 __A, __m64 __B) +{ + return __builtin_arm_wmacsz ((__v4hi)__A, (__v4hi)__B); +} + +/* Accumulate across all unsigned 8-bit values in __A. */ +static __inline __m64 +_mm_acc_pu8 (__m64 __A) +{ + return __builtin_arm_waccb ((__v8qi)__A); +} + +/* Accumulate across all unsigned 16-bit values in __A. */ +static __inline __m64 +_mm_acc_pu16 (__m64 __A) +{ + return __builtin_arm_wacch ((__v4hi)__A); +} + +/* Accumulate across all unsigned 32-bit values in __A. */ +static __inline __m64 +_mm_acc_pu32 (__m64 __A) +{ + return __builtin_arm_waccw ((__v2si)__A); +} + +static __inline __m64 +_mm_mia_si64 (__m64 __A, int __B, int __C) +{ + return __builtin_arm_tmia (__A, __B, __C); +} + +static __inline __m64 +_mm_miaph_si64 (__m64 __A, int __B, int __C) +{ + return __builtin_arm_tmiaph (__A, __B, __C); +} + +static __inline __m64 +_mm_miabb_si64 (__m64 __A, int __B, int __C) +{ + return __builtin_arm_tmiabb (__A, __B, __C); +} + +static __inline __m64 +_mm_miabt_si64 (__m64 __A, int __B, int __C) +{ + return __builtin_arm_tmiabt (__A, __B, __C); +} + +static __inline __m64 +_mm_miatb_si64 (__m64 __A, int __B, int __C) +{ + return __builtin_arm_tmiatb (__A, __B, __C); +} + +static __inline __m64 +_mm_miatt_si64 (__m64 __A, int __B, int __C) +{ + return __builtin_arm_tmiatt (__A, __B, __C); +} + +/* Extract one of the elements of A and sign extend. The selector N must + be immediate. */ +#define _mm_extract_pi8(A, N) __builtin_arm_textrmsb ((__v8qi)(A), (N)) +#define _mm_extract_pi16(A, N) __builtin_arm_textrmsh ((__v4hi)(A), (N)) +#define _mm_extract_pi32(A, N) __builtin_arm_textrmsw ((__v2si)(A), (N)) + +/* Extract one of the elements of A and zero extend. The selector N must + be immediate. */ +#define _mm_extract_pu8(A, N) __builtin_arm_textrmub ((__v8qi)(A), (N)) +#define _mm_extract_pu16(A, N) __builtin_arm_textrmuh ((__v4hi)(A), (N)) +#define _mm_extract_pu32(A, N) __builtin_arm_textrmuw ((__v2si)(A), (N)) + +/* Inserts word D into one of the elements of A. The selector N must be + immediate. */ +#define _mm_insert_pi8(A, D, N) \ + ((__m64) __builtin_arm_tinsrb ((__v8qi)(A), (D), (N))) +#define _mm_insert_pi16(A, D, N) \ + ((__m64) __builtin_arm_tinsrh ((__v4hi)(A), (D), (N))) +#define _mm_insert_pi32(A, D, N) \ + ((__m64) __builtin_arm_tinsrw ((__v2si)(A), (D), (N))) + +/* Compute the element-wise maximum of signed 8-bit values. */ +static __inline __m64 +_mm_max_pi8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wmaxsb ((__v8qi)__A, (__v8qi)__B); +} + +/* Compute the element-wise maximum of signed 16-bit values. */ +static __inline __m64 +_mm_max_pi16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wmaxsh ((__v4hi)__A, (__v4hi)__B); +} + +/* Compute the element-wise maximum of signed 32-bit values. */ +static __inline __m64 +_mm_max_pi32 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wmaxsw ((__v2si)__A, (__v2si)__B); +} + +/* Compute the element-wise maximum of unsigned 8-bit values. */ +static __inline __m64 +_mm_max_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wmaxub ((__v8qi)__A, (__v8qi)__B); +} + +/* Compute the element-wise maximum of unsigned 16-bit values. */ +static __inline __m64 +_mm_max_pu16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wmaxuh ((__v4hi)__A, (__v4hi)__B); +} + +/* Compute the element-wise maximum of unsigned 32-bit values. */ +static __inline __m64 +_mm_max_pu32 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wmaxuw ((__v2si)__A, (__v2si)__B); +} + +/* Compute the element-wise minimum of signed 16-bit values. */ +static __inline __m64 +_mm_min_pi8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wminsb ((__v8qi)__A, (__v8qi)__B); +} + +/* Compute the element-wise minimum of signed 16-bit values. */ +static __inline __m64 +_mm_min_pi16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wminsh ((__v4hi)__A, (__v4hi)__B); +} + +/* Compute the element-wise minimum of signed 32-bit values. */ +static __inline __m64 +_mm_min_pi32 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wminsw ((__v2si)__A, (__v2si)__B); +} + +/* Compute the element-wise minimum of unsigned 16-bit values. */ +static __inline __m64 +_mm_min_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wminub ((__v8qi)__A, (__v8qi)__B); +} + +/* Compute the element-wise minimum of unsigned 16-bit values. */ +static __inline __m64 +_mm_min_pu16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wminuh ((__v4hi)__A, (__v4hi)__B); +} + +/* Compute the element-wise minimum of unsigned 32-bit values. */ +static __inline __m64 +_mm_min_pu32 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wminuw ((__v2si)__A, (__v2si)__B); +} + +/* Create an 8-bit mask of the signs of 8-bit values. */ +static __inline int +_mm_movemask_pi8 (__m64 __A) +{ + return __builtin_arm_tmovmskb ((__v8qi)__A); +} + +/* Create an 8-bit mask of the signs of 16-bit values. */ +static __inline int +_mm_movemask_pi16 (__m64 __A) +{ + return __builtin_arm_tmovmskh ((__v4hi)__A); +} + +/* Create an 8-bit mask of the signs of 32-bit values. */ +static __inline int +_mm_movemask_pi32 (__m64 __A) +{ + return __builtin_arm_tmovmskw ((__v2si)__A); +} + +/* Return a combination of the four 16-bit values in A. The selector + must be an immediate. */ +#define _mm_shuffle_pi16(A, N) \ + ((__m64) __builtin_arm_wshufh ((__v4hi)(A), (N))) + + +/* Compute the rounded averages of the unsigned 8-bit values in A and B. */ +static __inline __m64 +_mm_avg_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wavg2br ((__v8qi)__A, (__v8qi)__B); +} + +/* Compute the rounded averages of the unsigned 16-bit values in A and B. */ +static __inline __m64 +_mm_avg_pu16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wavg2hr ((__v4hi)__A, (__v4hi)__B); +} + +/* Compute the averages of the unsigned 8-bit values in A and B. */ +static __inline __m64 +_mm_avg2_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wavg2b ((__v8qi)__A, (__v8qi)__B); +} + +/* Compute the averages of the unsigned 16-bit values in A and B. */ +static __inline __m64 +_mm_avg2_pu16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wavg2h ((__v4hi)__A, (__v4hi)__B); +} + +/* Compute the sum of the absolute differences of the unsigned 8-bit + values in A and B. Return the value in the lower 16-bit word; the + upper words are cleared. */ +static __inline __m64 +_mm_sad_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wsadbz ((__v8qi)__A, (__v8qi)__B); +} + +static __inline __m64 +_mm_sada_pu8 (__m64 __A, __m64 __B, __m64 __C) +{ + return (__m64) __builtin_arm_wsadb ((__v2si)__A, (__v8qi)__B, (__v8qi)__C); +} + +/* Compute the sum of the absolute differences of the unsigned 16-bit + values in A and B. Return the value in the lower 32-bit word; the + upper words are cleared. */ +static __inline __m64 +_mm_sad_pu16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wsadhz ((__v4hi)__A, (__v4hi)__B); +} + +static __inline __m64 +_mm_sada_pu16 (__m64 __A, __m64 __B, __m64 __C) +{ + return (__m64) __builtin_arm_wsadh ((__v2si)__A, (__v4hi)__B, (__v4hi)__C); +} + + +/* Compute the sum of the absolute differences of the unsigned 8-bit + values in A and B. Return the value in the lower 16-bit word; the + upper words are cleared. */ +static __inline __m64 +_mm_sadz_pu8 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wsadbz ((__v8qi)__A, (__v8qi)__B); +} + +/* Compute the sum of the absolute differences of the unsigned 16-bit + values in A and B. Return the value in the lower 32-bit word; the + upper words are cleared. */ +static __inline __m64 +_mm_sadz_pu16 (__m64 __A, __m64 __B) +{ + return (__m64) __builtin_arm_wsadhz ((__v4hi)__A, (__v4hi)__B); +} + +#define _mm_align_si64(__A,__B, N) \ + (__m64) __builtin_arm_walign ((__v8qi) (__A),(__v8qi) (__B), (N)) + +/* Creates a 64-bit zero. */ +static __inline __m64 +_mm_setzero_si64 (void) +{ + return __builtin_arm_wzero (); +} + +/* Set and Get arbitrary iWMMXt Control registers. + Note only registers 0-3 and 8-11 are currently defined, + the rest are reserved. */ + +static __inline void +_mm_setwcx (const int __value, const int __regno) +{ + switch (__regno) + { + case 0: + __asm __volatile ("tmcr wcid, %0" :: "r"(__value)); + break; + case 1: + __asm __volatile ("tmcr wcon, %0" :: "r"(__value)); + break; + case 2: + __asm __volatile ("tmcr wcssf, %0" :: "r"(__value)); + break; + case 3: + __asm __volatile ("tmcr wcasf, %0" :: "r"(__value)); + break; + case 8: + __builtin_arm_setwcgr0 (__value); + break; + case 9: + __builtin_arm_setwcgr1 (__value); + break; + case 10: + __builtin_arm_setwcgr2 (__value); + break; + case 11: + __builtin_arm_setwcgr3 (__value); + break; + default: + break; + } +} + +static __inline int +_mm_getwcx (const int __regno) +{ + int __value; + switch (__regno) + { + case 0: + __asm __volatile ("tmrc %0, wcid" : "=r"(__value)); + break; + case 1: + __asm __volatile ("tmrc %0, wcon" : "=r"(__value)); + break; + case 2: + __asm __volatile ("tmrc %0, wcssf" : "=r"(__value)); + break; + case 3: + __asm __volatile ("tmrc %0, wcasf" : "=r"(__value)); + break; + case 8: + return __builtin_arm_getwcgr0 (); + case 9: + return __builtin_arm_getwcgr1 (); + case 10: + return __builtin_arm_getwcgr2 (); + case 11: + return __builtin_arm_getwcgr3 (); + default: + break; + } + return __value; +} + +/* Creates a vector of two 32-bit values; I0 is least significant. */ +static __inline __m64 +_mm_set_pi32 (int __i1, int __i0) +{ + union + { + __m64 __q; + struct + { + unsigned int __i0; + unsigned int __i1; + } __s; + } __u; + + __u.__s.__i0 = __i0; + __u.__s.__i1 = __i1; + + return __u.__q; +} + +/* Creates a vector of four 16-bit values; W0 is least significant. */ +static __inline __m64 +_mm_set_pi16 (short __w3, short __w2, short __w1, short __w0) +{ + unsigned int __i1 = (unsigned short) __w3 << 16 | (unsigned short) __w2; + unsigned int __i0 = (unsigned short) __w1 << 16 | (unsigned short) __w0; + + return _mm_set_pi32 (__i1, __i0); +} + +/* Creates a vector of eight 8-bit values; B0 is least significant. */ +static __inline __m64 +_mm_set_pi8 (char __b7, char __b6, char __b5, char __b4, + char __b3, char __b2, char __b1, char __b0) +{ + unsigned int __i1, __i0; + + __i1 = (unsigned char)__b7; + __i1 = __i1 << 8 | (unsigned char)__b6; + __i1 = __i1 << 8 | (unsigned char)__b5; + __i1 = __i1 << 8 | (unsigned char)__b4; + + __i0 = (unsigned char)__b3; + __i0 = __i0 << 8 | (unsigned char)__b2; + __i0 = __i0 << 8 | (unsigned char)__b1; + __i0 = __i0 << 8 | (unsigned char)__b0; + + return _mm_set_pi32 (__i1, __i0); +} + +/* Similar, but with the arguments in reverse order. */ +static __inline __m64 +_mm_setr_pi32 (int __i0, int __i1) +{ + return _mm_set_pi32 (__i1, __i0); +} + +static __inline __m64 +_mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3) +{ + return _mm_set_pi16 (__w3, __w2, __w1, __w0); +} + +static __inline __m64 +_mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3, + char __b4, char __b5, char __b6, char __b7) +{ + return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0); +} + +/* Creates a vector of two 32-bit values, both elements containing I. */ +static __inline __m64 +_mm_set1_pi32 (int __i) +{ + return _mm_set_pi32 (__i, __i); +} + +/* Creates a vector of four 16-bit values, all elements containing W. */ +static __inline __m64 +_mm_set1_pi16 (short __w) +{ + unsigned int __i = (unsigned short)__w << 16 | (unsigned short)__w; + return _mm_set1_pi32 (__i); +} + +/* Creates a vector of four 16-bit values, all elements containing B. */ +static __inline __m64 +_mm_set1_pi8 (char __b) +{ + unsigned int __w = (unsigned char)__b << 8 | (unsigned char)__b; + unsigned int __i = __w << 16 | __w; + return _mm_set1_pi32 (__i); +} + +#ifdef __IWMMXT2__ +static __inline __m64 +_mm_abs_pi8 (__m64 m1) +{ + return (__m64) __builtin_arm_wabsb ((__v8qi)m1); +} + +static __inline __m64 +_mm_abs_pi16 (__m64 m1) +{ + return (__m64) __builtin_arm_wabsh ((__v4hi)m1); + +} + +static __inline __m64 +_mm_abs_pi32 (__m64 m1) +{ + return (__m64) __builtin_arm_wabsw ((__v2si)m1); + +} + +static __inline __m64 +_mm_addsubhx_pi16 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_waddsubhx ((__v4hi)a, (__v4hi)b); +} + +static __inline __m64 +_mm_absdiff_pu8 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wabsdiffb ((__v8qi)a, (__v8qi)b); +} + +static __inline __m64 +_mm_absdiff_pu16 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wabsdiffh ((__v4hi)a, (__v4hi)b); +} + +static __inline __m64 +_mm_absdiff_pu32 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wabsdiffw ((__v2si)a, (__v2si)b); +} + +static __inline __m64 +_mm_addc_pu16 (__m64 a, __m64 b) +{ + __m64 result; + __asm__ __volatile__ ("waddhc %0, %1, %2" : "=y" (result) : "y" (a), "y" (b)); + return result; +} + +static __inline __m64 +_mm_addc_pu32 (__m64 a, __m64 b) +{ + __m64 result; + __asm__ __volatile__ ("waddwc %0, %1, %2" : "=y" (result) : "y" (a), "y" (b)); + return result; +} + +static __inline __m64 +_mm_avg4_pu8 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wavg4 ((__v8qi)a, (__v8qi)b); +} + +static __inline __m64 +_mm_avg4r_pu8 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wavg4r ((__v8qi)a, (__v8qi)b); +} + +static __inline __m64 +_mm_maddx_pi16 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wmaddsx ((__v4hi)a, (__v4hi)b); +} + +static __inline __m64 +_mm_maddx_pu16 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wmaddux ((__v4hi)a, (__v4hi)b); +} + +static __inline __m64 +_mm_msub_pi16 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wmaddsn ((__v4hi)a, (__v4hi)b); +} + +static __inline __m64 +_mm_msub_pu16 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wmaddun ((__v4hi)a, (__v4hi)b); +} + +static __inline __m64 +_mm_mulhi_pi32 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wmulwsm ((__v2si)a, (__v2si)b); +} + +static __inline __m64 +_mm_mulhi_pu32 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wmulwum ((__v2si)a, (__v2si)b); +} + +static __inline __m64 +_mm_mulhir_pi16 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wmulsmr ((__v4hi)a, (__v4hi)b); +} + +static __inline __m64 +_mm_mulhir_pi32 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wmulwsmr ((__v2si)a, (__v2si)b); +} + +static __inline __m64 +_mm_mulhir_pu16 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wmulumr ((__v4hi)a, (__v4hi)b); +} + +static __inline __m64 +_mm_mulhir_pu32 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wmulwumr ((__v2si)a, (__v2si)b); +} + +static __inline __m64 +_mm_mullo_pi32 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wmulwl ((__v2si)a, (__v2si)b); +} + +static __inline __m64 +_mm_qmulm_pi16 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wqmulm ((__v4hi)a, (__v4hi)b); +} + +static __inline __m64 +_mm_qmulm_pi32 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wqmulwm ((__v2si)a, (__v2si)b); +} + +static __inline __m64 +_mm_qmulmr_pi16 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wqmulmr ((__v4hi)a, (__v4hi)b); +} + +static __inline __m64 +_mm_qmulmr_pi32 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wqmulwmr ((__v2si)a, (__v2si)b); +} + +static __inline __m64 +_mm_subaddhx_pi16 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_wsubaddhx ((__v4hi)a, (__v4hi)b); +} + +static __inline __m64 +_mm_addbhusl_pu8 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_waddbhusl ((__v4hi)a, (__v8qi)b); +} + +static __inline __m64 +_mm_addbhusm_pu8 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_waddbhusm ((__v4hi)a, (__v8qi)b); +} + +#define _mm_qmiabb_pi32(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wqmiabb ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\ + _acc;\ + }) + +#define _mm_qmiabbn_pi32(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wqmiabbn ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\ + _acc;\ + }) + +#define _mm_qmiabt_pi32(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wqmiabt ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\ + _acc;\ + }) + +#define _mm_qmiabtn_pi32(acc, m1, m2) \ + ({\ + __m64 _acc=acc;\ + __m64 _m1=m1;\ + __m64 _m2=m2;\ + _acc = (__m64) __builtin_arm_wqmiabtn ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\ + _acc;\ + }) + +#define _mm_qmiatb_pi32(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wqmiatb ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\ + _acc;\ + }) + +#define _mm_qmiatbn_pi32(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wqmiatbn ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\ + _acc;\ + }) + +#define _mm_qmiatt_pi32(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wqmiatt ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\ + _acc;\ + }) + +#define _mm_qmiattn_pi32(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wqmiattn ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\ + _acc;\ + }) + +#define _mm_wmiabb_si64(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wmiabb (_acc, (__v4hi)_m1, (__v4hi)_m2);\ + _acc;\ + }) + +#define _mm_wmiabbn_si64(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wmiabbn (_acc, (__v4hi)_m1, (__v4hi)_m2);\ + _acc;\ + }) + +#define _mm_wmiabt_si64(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wmiabt (_acc, (__v4hi)_m1, (__v4hi)_m2);\ + _acc;\ + }) + +#define _mm_wmiabtn_si64(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wmiabtn (_acc, (__v4hi)_m1, (__v4hi)_m2);\ + _acc;\ + }) + +#define _mm_wmiatb_si64(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wmiatb (_acc, (__v4hi)_m1, (__v4hi)_m2);\ + _acc;\ + }) + +#define _mm_wmiatbn_si64(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wmiatbn (_acc, (__v4hi)_m1, (__v4hi)_m2);\ + _acc;\ + }) + +#define _mm_wmiatt_si64(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wmiatt (_acc, (__v4hi)_m1, (__v4hi)_m2);\ + _acc;\ + }) + +#define _mm_wmiattn_si64(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wmiattn (_acc, (__v4hi)_m1, (__v4hi)_m2);\ + _acc;\ + }) + +#define _mm_wmiawbb_si64(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wmiawbb (_acc, (__v2si)_m1, (__v2si)_m2);\ + _acc;\ + }) + +#define _mm_wmiawbbn_si64(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wmiawbbn (_acc, (__v2si)_m1, (__v2si)_m2);\ + _acc;\ + }) + +#define _mm_wmiawbt_si64(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wmiawbt (_acc, (__v2si)_m1, (__v2si)_m2);\ + _acc;\ + }) + +#define _mm_wmiawbtn_si64(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wmiawbtn (_acc, (__v2si)_m1, (__v2si)_m2);\ + _acc;\ + }) + +#define _mm_wmiawtb_si64(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wmiawtb (_acc, (__v2si)_m1, (__v2si)_m2);\ + _acc;\ + }) + +#define _mm_wmiawtbn_si64(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wmiawtbn (_acc, (__v2si)_m1, (__v2si)_m2);\ + _acc;\ + }) + +#define _mm_wmiawtt_si64(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wmiawtt (_acc, (__v2si)_m1, (__v2si)_m2);\ + _acc;\ + }) + +#define _mm_wmiawttn_si64(acc, m1, m2) \ + ({\ + __m64 _acc = acc;\ + __m64 _m1 = m1;\ + __m64 _m2 = m2;\ + _acc = (__m64) __builtin_arm_wmiawttn (_acc, (__v2si)_m1, (__v2si)_m2);\ + _acc;\ + }) + +/* The third arguments should be an immediate. */ +#define _mm_merge_si64(a, b, n) \ + ({\ + __m64 result;\ + result = (__m64) __builtin_arm_wmerge ((__m64) (a), (__m64) (b), (n));\ + result;\ + }) +#endif /* __IWMMXT2__ */ + +static __inline __m64 +_mm_alignr0_si64 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_walignr0 ((__v8qi) a, (__v8qi) b); +} + +static __inline __m64 +_mm_alignr1_si64 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_walignr1 ((__v8qi) a, (__v8qi) b); +} + +static __inline __m64 +_mm_alignr2_si64 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_walignr2 ((__v8qi) a, (__v8qi) b); +} + +static __inline __m64 +_mm_alignr3_si64 (__m64 a, __m64 b) +{ + return (__m64) __builtin_arm_walignr3 ((__v8qi) a, (__v8qi) b); +} + +static __inline void +_mm_tandcb () +{ + __asm __volatile ("tandcb r15"); +} + +static __inline void +_mm_tandch () +{ + __asm __volatile ("tandch r15"); +} + +static __inline void +_mm_tandcw () +{ + __asm __volatile ("tandcw r15"); +} + +#define _mm_textrcb(n) \ + ({\ + __asm__ __volatile__ (\ + "textrcb r15, %0" : : "i" (n));\ + }) + +#define _mm_textrch(n) \ + ({\ + __asm__ __volatile__ (\ + "textrch r15, %0" : : "i" (n));\ + }) + +#define _mm_textrcw(n) \ + ({\ + __asm__ __volatile__ (\ + "textrcw r15, %0" : : "i" (n));\ + }) + +static __inline void +_mm_torcb () +{ + __asm __volatile ("torcb r15"); +} + +static __inline void +_mm_torch () +{ + __asm __volatile ("torch r15"); +} + +static __inline void +_mm_torcw () +{ + __asm __volatile ("torcw r15"); +} + +#ifdef __IWMMXT2__ +static __inline void +_mm_torvscb () +{ + __asm __volatile ("torvscb r15"); +} + +static __inline void +_mm_torvsch () +{ + __asm __volatile ("torvsch r15"); +} + +static __inline void +_mm_torvscw () +{ + __asm __volatile ("torvscw r15"); +} +#endif /* __IWMMXT2__ */ + +static __inline __m64 +_mm_tbcst_pi8 (int value) +{ + return (__m64) __builtin_arm_tbcstb ((signed char) value); +} + +static __inline __m64 +_mm_tbcst_pi16 (int value) +{ + return (__m64) __builtin_arm_tbcsth ((short) value); +} + +static __inline __m64 +_mm_tbcst_pi32 (int value) +{ + return (__m64) __builtin_arm_tbcstw (value); +} + +#define _m_empty _mm_empty +#define _m_packsswb _mm_packs_pi16 +#define _m_packssdw _mm_packs_pi32 +#define _m_packuswb _mm_packs_pu16 +#define _m_packusdw _mm_packs_pu32 +#define _m_packssqd _mm_packs_pi64 +#define _m_packusqd _mm_packs_pu64 +#define _mm_packs_si64 _mm_packs_pi64 +#define _mm_packs_su64 _mm_packs_pu64 +#define _m_punpckhbw _mm_unpackhi_pi8 +#define _m_punpckhwd _mm_unpackhi_pi16 +#define _m_punpckhdq _mm_unpackhi_pi32 +#define _m_punpcklbw _mm_unpacklo_pi8 +#define _m_punpcklwd _mm_unpacklo_pi16 +#define _m_punpckldq _mm_unpacklo_pi32 +#define _m_punpckehsbw _mm_unpackeh_pi8 +#define _m_punpckehswd _mm_unpackeh_pi16 +#define _m_punpckehsdq _mm_unpackeh_pi32 +#define _m_punpckehubw _mm_unpackeh_pu8 +#define _m_punpckehuwd _mm_unpackeh_pu16 +#define _m_punpckehudq _mm_unpackeh_pu32 +#define _m_punpckelsbw _mm_unpackel_pi8 +#define _m_punpckelswd _mm_unpackel_pi16 +#define _m_punpckelsdq _mm_unpackel_pi32 +#define _m_punpckelubw _mm_unpackel_pu8 +#define _m_punpckeluwd _mm_unpackel_pu16 +#define _m_punpckeludq _mm_unpackel_pu32 +#define _m_paddb _mm_add_pi8 +#define _m_paddw _mm_add_pi16 +#define _m_paddd _mm_add_pi32 +#define _m_paddsb _mm_adds_pi8 +#define _m_paddsw _mm_adds_pi16 +#define _m_paddsd _mm_adds_pi32 +#define _m_paddusb _mm_adds_pu8 +#define _m_paddusw _mm_adds_pu16 +#define _m_paddusd _mm_adds_pu32 +#define _m_psubb _mm_sub_pi8 +#define _m_psubw _mm_sub_pi16 +#define _m_psubd _mm_sub_pi32 +#define _m_psubsb _mm_subs_pi8 +#define _m_psubsw _mm_subs_pi16 +#define _m_psubuw _mm_subs_pi32 +#define _m_psubusb _mm_subs_pu8 +#define _m_psubusw _mm_subs_pu16 +#define _m_psubusd _mm_subs_pu32 +#define _m_pmaddwd _mm_madd_pi16 +#define _m_pmadduwd _mm_madd_pu16 +#define _m_pmulhw _mm_mulhi_pi16 +#define _m_pmulhuw _mm_mulhi_pu16 +#define _m_pmullw _mm_mullo_pi16 +#define _m_pmacsw _mm_mac_pi16 +#define _m_pmacuw _mm_mac_pu16 +#define _m_pmacszw _mm_macz_pi16 +#define _m_pmacuzw _mm_macz_pu16 +#define _m_paccb _mm_acc_pu8 +#define _m_paccw _mm_acc_pu16 +#define _m_paccd _mm_acc_pu32 +#define _m_pmia _mm_mia_si64 +#define _m_pmiaph _mm_miaph_si64 +#define _m_pmiabb _mm_miabb_si64 +#define _m_pmiabt _mm_miabt_si64 +#define _m_pmiatb _mm_miatb_si64 +#define _m_pmiatt _mm_miatt_si64 +#define _m_psllw _mm_sll_pi16 +#define _m_psllwi _mm_slli_pi16 +#define _m_pslld _mm_sll_pi32 +#define _m_pslldi _mm_slli_pi32 +#define _m_psllq _mm_sll_si64 +#define _m_psllqi _mm_slli_si64 +#define _m_psraw _mm_sra_pi16 +#define _m_psrawi _mm_srai_pi16 +#define _m_psrad _mm_sra_pi32 +#define _m_psradi _mm_srai_pi32 +#define _m_psraq _mm_sra_si64 +#define _m_psraqi _mm_srai_si64 +#define _m_psrlw _mm_srl_pi16 +#define _m_psrlwi _mm_srli_pi16 +#define _m_psrld _mm_srl_pi32 +#define _m_psrldi _mm_srli_pi32 +#define _m_psrlq _mm_srl_si64 +#define _m_psrlqi _mm_srli_si64 +#define _m_prorw _mm_ror_pi16 +#define _m_prorwi _mm_rori_pi16 +#define _m_prord _mm_ror_pi32 +#define _m_prordi _mm_rori_pi32 +#define _m_prorq _mm_ror_si64 +#define _m_prorqi _mm_rori_si64 +#define _m_pand _mm_and_si64 +#define _m_pandn _mm_andnot_si64 +#define _m_por _mm_or_si64 +#define _m_pxor _mm_xor_si64 +#define _m_pcmpeqb _mm_cmpeq_pi8 +#define _m_pcmpeqw _mm_cmpeq_pi16 +#define _m_pcmpeqd _mm_cmpeq_pi32 +#define _m_pcmpgtb _mm_cmpgt_pi8 +#define _m_pcmpgtub _mm_cmpgt_pu8 +#define _m_pcmpgtw _mm_cmpgt_pi16 +#define _m_pcmpgtuw _mm_cmpgt_pu16 +#define _m_pcmpgtd _mm_cmpgt_pi32 +#define _m_pcmpgtud _mm_cmpgt_pu32 +#define _m_pextrb _mm_extract_pi8 +#define _m_pextrw _mm_extract_pi16 +#define _m_pextrd _mm_extract_pi32 +#define _m_pextrub _mm_extract_pu8 +#define _m_pextruw _mm_extract_pu16 +#define _m_pextrud _mm_extract_pu32 +#define _m_pinsrb _mm_insert_pi8 +#define _m_pinsrw _mm_insert_pi16 +#define _m_pinsrd _mm_insert_pi32 +#define _m_pmaxsb _mm_max_pi8 +#define _m_pmaxsw _mm_max_pi16 +#define _m_pmaxsd _mm_max_pi32 +#define _m_pmaxub _mm_max_pu8 +#define _m_pmaxuw _mm_max_pu16 +#define _m_pmaxud _mm_max_pu32 +#define _m_pminsb _mm_min_pi8 +#define _m_pminsw _mm_min_pi16 +#define _m_pminsd _mm_min_pi32 +#define _m_pminub _mm_min_pu8 +#define _m_pminuw _mm_min_pu16 +#define _m_pminud _mm_min_pu32 +#define _m_pmovmskb _mm_movemask_pi8 +#define _m_pmovmskw _mm_movemask_pi16 +#define _m_pmovmskd _mm_movemask_pi32 +#define _m_pshufw _mm_shuffle_pi16 +#define _m_pavgb _mm_avg_pu8 +#define _m_pavgw _mm_avg_pu16 +#define _m_pavg2b _mm_avg2_pu8 +#define _m_pavg2w _mm_avg2_pu16 +#define _m_psadbw _mm_sad_pu8 +#define _m_psadwd _mm_sad_pu16 +#define _m_psadzbw _mm_sadz_pu8 +#define _m_psadzwd _mm_sadz_pu16 +#define _m_paligniq _mm_align_si64 +#define _m_cvt_si2pi _mm_cvtsi64_m64 +#define _m_cvt_pi2si _mm_cvtm64_si64 +#define _m_from_int _mm_cvtsi32_si64 +#define _m_to_int _mm_cvtsi64_si32 + +#if defined __cplusplus +}; /* End "C" */ +#endif /* __cplusplus */ + +#endif /* _MMINTRIN_H_INCLUDED */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/NXConstStr.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/NXConstStr.h new file mode 100644 index 0000000..acc42d6 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/NXConstStr.h @@ -0,0 +1,51 @@ +/* Interface for the NXConstantString class for Objective-C. + Copyright (C) 1995-2017 Free Software Foundation, Inc. + Contributed by Pieter J. Schoenmakers + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +GCC is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + + +#ifndef __nxconstantstring_INCLUDE_GNU +#define __nxconstantstring_INCLUDE_GNU + +#include "Object.h" + +#ifdef __cplusplus +extern "C" { +#endif + +@interface NXConstantString: Object +{ + char *c_string; + unsigned int len; +} + +-(const char *) cString; +-(unsigned int) length; + +@end + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/Object.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/Object.h new file mode 100644 index 0000000..a6e85dd --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/Object.h @@ -0,0 +1,62 @@ +/* Interface for the Object class for Objective-C. + Copyright (C) 1993-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +GCC is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + + +#ifndef __object_INCLUDE_GNU +#define __object_INCLUDE_GNU + +#include "objc.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* The Object class is a very minimal root class included with the + runtime. It is used as superclass for the two classes included + with the runtime, Protocol and NXConstantString. + + Because Objective-C allows multiple root classes, you can define + your own root class, different from Object. + + In particular, a Foundation library (such as GNUstep Base) is + expected to provide its own root class (typically called NSObject), + fully integrated with the library's own high-level features. It is + expected that you should always use and interact with NSObject, and + mostly ignore Object. */ + +/* All classes are derived from Object. As such, this is the overhead + tacked onto those objects. */ +@interface Object +{ + Class isa; /* A pointer to the instance's class structure. */ +} +- (Class)class; +- (BOOL)isEqual: (id)anObject; +@end + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/Protocol.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/Protocol.h new file mode 100644 index 0000000..50c3838 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/Protocol.h @@ -0,0 +1,54 @@ +/* Declare the class Protocol for Objective C programs. + Copyright (C) 1993-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + + +#ifndef __Protocol_INCLUDE_GNU +#define __Protocol_INCLUDE_GNU + +#include "Object.h" + +#ifdef __cplusplus +extern "C" { +#endif + +@interface Protocol : Object +{ +@private + char *protocol_name; + struct objc_protocol_list *protocol_list; + struct objc_method_description_list *instance_methods, *class_methods; +} +@end + +/* The Protocol methods have been replaced by + protocol_getName() + protocol_conformsToProtocol() + protocol_getMethodDescription() +*/ + +#ifdef __cplusplus +} +#endif + +#endif /* not __Protocol_INCLUDE_GNU */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/message.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/message.h new file mode 100644 index 0000000..6bb5aa7 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/message.h @@ -0,0 +1,119 @@ +/* GNU Objective C Runtime messaging declarations + Copyright (C) 1993-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#ifndef __objc_message_INCLUDE_GNU +#define __objc_message_INCLUDE_GNU + +#include "objc.h" +#include "objc-decls.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* This file includes declarations of the messaging functions and + types. */ + +/* Compatibility note: the messaging function is one area where the + GNU runtime and the Apple/NeXT runtime differ significantly. If + you can, it is recommended that you use higher-level facilities + (provided by a Foundation library such as GNUstep Base) to perform + forwarding or other advanced messaging tricks. */ + +/* This function returns the IMP (C function implementing a method) to + use to invoke the method with selector 'op' of receiver 'receiver'. + + This is the function used by the compiler when compiling method + invocations with the GNU runtime. For example, the method call + + result = [receiver method]; + + is compiled by the compiler (with the GNU runtime) into the + equivalent of: + + { + IMP function = objc_msg_lookup (receiver, @selector (method)); + result = function (receiver, @selector (method)); + } + + so, a call to objc_msg_lookup() determines the IMP (the C function + implementing the method) to call. Then, the function is called. + If the method takes or returns different arguments, the compiler + will cast 'function' to the right type before invoking it, making + sure arguments and return value are handled correctly. + + objc_msg_lookup() must always return a valid function that can be + called with the required method signature (otherwise the + compiler-generated code shown above could segfault). If 'receiver' + is NULL, objc_msg_lookup() returns a C function that does nothing, + ignores all its arguments, and returns NULL (see nil_method.c). If + 'receiver' does not respond to the selector 'op', objc_msg_lookup() + will try to call +resolveClassMethod: or resolveInstanceMethod: as + appropriate, and if they return YES, it will try the lookup again + (+resolveClassMethod: and +resolveInstanceMethod: can thus install + dynamically methods as they are requested). If + +resolveClassMethod: or +resolveInstanceMethod: are either not + available, or return NO, or return YES but 'receiver' still doesn't + implement the 'selector' after calling them, the runtime returns a + generic "forwarding" function that can be called with the required + method signature and which can process the method invocation + according to the forwarding API. There are two runtime hooks that + allow Foundation libraries (such as GNUstep-Base) to return their + own forwarding function in preference to the runtime ones. When + that happens, the Foundation library effectively takes complete + control of the forwarding process; any method invocation where the + selector is not implemented by the receiver will end up calling a + forwarding function chosen by the Foundation library. */ +objc_EXPORT IMP objc_msg_lookup (id receiver, SEL op); + +/* Structure used when a message is send to a class's super class. + The compiler generates one of these structures and passes it to + objc_msg_lookup_super() when a [super method] call is compiled. */ + +/* Modern API. */ +struct objc_super +{ + id self; /* The receiver of the message. */ + Class super_class; /* The superclass of the receiver. */ +}; + +/* This is used by the compiler instead of objc_msg_lookup () when + compiling a call to 'super', such as [super method]. This requires + sending a message to super->self, but looking up the method as if + super->self was in class super->super_class. */ +objc_EXPORT IMP objc_msg_lookup_super (struct objc_super *super, SEL sel); + +/* Hooks for method forwarding. They make it easy to substitute the + built-in forwarding with one based on a library, such as ffi, that + implement closures, thereby avoiding gcc's __builtin_apply + problems. __objc_msg_forward2's result will be preferred over that + of __objc_msg_forward if both are set and return non-NULL. */ +objc_EXPORT IMP (*__objc_msg_forward)(SEL); +objc_EXPORT IMP (*__objc_msg_forward2)(id, SEL); + +#ifdef __cplusplus +} +#endif + +#endif /* not __objc_message_INCLUDE_GNU */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/objc-decls.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/objc-decls.h new file mode 100644 index 0000000..3dd1135 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/objc-decls.h @@ -0,0 +1,46 @@ +/* GNU Objective-C Extern helpers for Win32. + Copyright (C) 2004-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +GCC is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + + +#ifndef __objc_decls_INCLUDE_GNU +#define __objc_decls_INCLUDE_GNU + +#if defined (_WIN32) || defined (__WIN32__) || defined (WIN32) + +# ifdef DLL_EXPORT /* defined by libtool (if required) */ +# define objc_EXPORT +# define objc_DECLARE +# else +# define objc_EXPORT extern __declspec(dllimport) +# define objc_DECLARE extern __declspec(dllimport) +# endif + +#else + +# define objc_EXPORT extern +# define objc_DECLARE + +#endif + +#endif /* __objc_decls_INCLUDE_GNU */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/objc-exception.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/objc-exception.h new file mode 100644 index 0000000..0bec446 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/objc-exception.h @@ -0,0 +1,109 @@ +/* GNU Objective C Runtime native exceptions + Copyright (C) 2010-2017 Free Software Foundation, Inc. + Contributed by Nicola Pero + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#ifndef __objc_exception_INCLUDE_GNU +#define __objc_exception_INCLUDE_GNU + +#include "objc.h" +#include "objc-decls.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* 'objc_exception_throw' throws the exception 'exception', which is + an exception object. + + Calls to 'objc_exception_throw' are automatically generated by the + compiler: an Objective-C "@throw exception;" statement gets + compiled into the equivalent of "objc_exception_throw + (exception);". + + 'objc_exception_throw' searches for a @catch() that can catch the + exception. By default, @catch (MyClass object) will catch all + exception objects that are of class MyClass or of a subclass of + MyClass; if the exception object is 'nil', then the exception can + only be caught with a catch-all exception handler where no + exception class is specified (such as @catch(id object)). This + behaviour can be customized by setting an 'objc_exception_matcher' + function (using objc_set_exception_matcher(), see below); if one is + set, it is used instead of the default one. + + If the exception is uncaught (there is no @catch() to catch it), + the program aborts. It is possible to customize this behaviour by + setting an 'objc_uncaught_exception_handler' function (using + objc_set_uncaught_exception_handler(), see below); if one is set, + it is executed before abort() is called. An uncaught exception + handler is expected to never return. */ +objc_EXPORT void objc_exception_throw (id exception); + +/* Compatibility note: the Apple/NeXT runtime seems to also have + objc_exception_rethrow(), objc_begin_catch() and objc_end_catch(). + Currently the GNU runtime does not use them. */ + +/* The following functions allow customizing to a certain extent the + exception handling. They are not thread safe and should be called + during the program initialization before threads are started. They + are mostly reserved for "Foundation" libraries; in the case of + GNUstep, GNUstep Base may be using these functions to improve the + standard exception handling. You probably shouldn't use these + functions unless you are writing your own Foundation library. */ + +/* Compatibility note: objc_set_exception_preprocessor() (available on + the Apple/NeXT runtime) is not available on the GNU runtime. */ + +/* An 'objc_exception_matcher' function is used to match an exception + to a @catch clause. 'catch_class' is the class of objects caught + by the @catch clause (for example, in "@catch (Object *o)", the + catch_class is Object). It should return 1 if the exception should + be caught by a @catch with a catch_class argument, and 0 if + not. */ +typedef int (*objc_exception_matcher)(Class catch_class, id exception); + +/* Sets a new exception matcher function, and returns the previous + exception matcher function. This function is not safe to call in a + multi-threaded environment because other threads may be trying to + invoke the exception matcher while you change it! */ +objc_EXPORT objc_exception_matcher +objc_setExceptionMatcher (objc_exception_matcher new_matcher); + + +/* An 'objc_uncaught_exception_handler' function is a function that + handles uncaught exceptions. It should never return. */ +typedef void (*objc_uncaught_exception_handler)(id exception); + +/* Sets a new uncaught exception handler function, and returns the + previous exception handler function. This function is not safe to + call in a multi-threaded environment because other threads may be + trying to invoke the uncaught exception handler while you change + it. */ +objc_EXPORT objc_uncaught_exception_handler +objc_setUncaughtExceptionHandler (objc_uncaught_exception_handler new_handler); + +#ifdef __cplusplus +} +#endif + +#endif /* not __objc_exception_INCLUDE_GNU */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/objc-sync.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/objc-sync.h new file mode 100644 index 0000000..8157566 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/objc-sync.h @@ -0,0 +1,69 @@ +/* GNU Objective C Runtime @synchronized implementation + Copyright (C) 2010-2017 Free Software Foundation, Inc. + Contributed by Nicola Pero + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#ifndef __objc_sync_INCLUDE_GNU +#define __objc_sync_INCLUDE_GNU + +#include "objc.h" +#include "objc-decls.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* These functions are automatically called by @synchronized(). */ + +/* 'objc_sync_enter' is automatically called when entering a + @synchronized() block. It locks the recursive lock associated with + 'object'. If 'object' is nil, it does nothing. It returns + OBJC_SYNC_SUCCESS on success; see the enumeration below for error + values. + + Note that you should not rely on the behaviour when 'object' is nil + because it could change. */ +objc_EXPORT int objc_sync_enter (id object); + +/* 'objc_sync_exit' is automatically called when exiting from a + @synchronized() block. It unlocks the recursive lock associated + with 'object'. If 'object' is nil, it does nothing. It returns + OBJC_SYNC_SUCCESS on success; see the enumeration below for error + values. */ +objc_EXPORT int objc_sync_exit (id object); + +/* All the possible return values for objc_sync_enter() and + objc_sync_exit(). + */ +enum { + OBJC_SYNC_SUCCESS = 0, + OBJC_SYNC_NOT_OWNING_THREAD_ERROR = -1, + OBJC_SYNC_TIMED_OUT = -2, + OBJC_SYNC_NOT_INITIALIZED = -3 +}; + +#ifdef __cplusplus +} +#endif + +#endif /* not __objc_sync_INCLUDE_GNU */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/objc.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/objc.h new file mode 100644 index 0000000..20fcdb4 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/objc.h @@ -0,0 +1,151 @@ +/* Basic data types for Objective C. + Copyright (C) 1993-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#ifndef __objc_INCLUDE_GNU +#define __objc_INCLUDE_GNU + +/* This file contains the definition of the basic types used by the + Objective-C language. It needs to be included to do almost + anything with Objective-C. */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* The current version of the GNU Objective-C Runtime library in + compressed ISO date format. This should be updated any time a new + version is released with changes to the public API (there is no + need to update it if there were no API changes since the previous + release). This macro is only defined starting with the GNU + Objective-C Runtime shipped with GCC 4.6.0. If it is not defined, + it is either an older version of the runtime, or another runtime. */ +#define __GNU_LIBOBJC__ 20110608 + +/* Definition of the boolean type. + + Compatibility note: the Apple/NeXT runtime defines a BOOL as a + 'signed char'. The GNU runtime uses an 'unsigned char'. + + Important: this could change and we could switch to 'typedef bool + BOOL' in the future. Do not depend on the type of BOOL. */ +#undef BOOL +typedef unsigned char BOOL; + +#define YES (BOOL)1 +#define NO (BOOL)0 + +/* The basic Objective-C types (SEL, Class, id) are defined as pointer + to opaque structures. The details of the structures are private to + the runtime and may potentially change from one version to the + other. */ + +/* A SEL (selector) represents an abstract method (in the + object-oriented sense) and includes all the details of how to + invoke the method (which means its name, arguments and return + types) but provides no implementation of its own. You can check + whether a class implements a selector or not, and if you have a + selector and know that the class implements it, you can use it to + call the method for an object in the class. */ +typedef const struct objc_selector *SEL; + +/* A Class is a class (in the object-oriented sense). In Objective-C + there is the complication that each Class is an object itself, and + so belongs to a class too. This class that a class belongs to is + called its 'meta class'. */ +typedef struct objc_class *Class; + +/* An 'id' is an object of an unknown class. The way the object data + is stored inside the object is private and what you see here is + only the beginning of the actual struct. The first field is always + a pointer to the Class that the object belongs to. */ +typedef struct objc_object +{ + /* 'class_pointer' is the Class that the object belongs to. In case + of a Class object, this pointer points to the meta class. + + Compatibility Note: The Apple/NeXT runtime calls this field + 'isa'. To access this field, use object_getClass() from + runtime.h, which is an inline function so does not add any + overhead and is also portable to other runtimes. */ + Class class_pointer; +} *id; + +/* 'IMP' is a C function that implements a method. When retrieving + the implementation of a method from the runtime, this is the type + of the pointer returned. The idea of the definition of IMP is to + represent a 'pointer to a general function taking an id, a SEL, + followed by other unspecified arguments'. You must always cast an + IMP to a pointer to a function taking the appropriate, specific + types for that function, before calling it - to make sure the + appropriate arguments are passed to it. The code generated by the + compiler to perform method calls automatically does this cast + inside method calls. */ +typedef id (*IMP)(id, SEL, ...); + +/* 'nil' is the null object. Messages to nil do nothing and always + return 0. */ +#define nil (id)0 + +/* 'Nil' is the null class. Since classes are objects too, this is + actually the same object as 'nil' (and behaves in the same way), + but it has a type of Class, so it is good to use it instead of + 'nil' if you are comparing a Class object to nil as it enables the + compiler to do some type-checking. */ +#define Nil (Class)0 + +/* TODO: Move the 'Protocol' declaration into objc/runtime.h. A + Protocol is simply an object, not a basic Objective-C type. The + Apple runtime defines Protocol in objc/runtime.h too, so it's good + to move it there for API compatibility. */ + +/* A 'Protocol' is a formally defined list of selectors (normally + created using the @protocol Objective-C syntax). It is mostly used + at compile-time to check that classes implement all the methods + that they are supposed to. Protocols are also available in the + runtime system as Protocol objects. */ +#ifndef __OBJC__ + /* Once we stop including the deprecated struct_objc_protocol.h + there is no reason to even define a 'struct objc_protocol'. As + all the structure details will be hidden, a Protocol basically is + simply an object (as it should be). */ + typedef struct objc_object Protocol; +#else /* __OBJC__ */ + @class Protocol; +#endif + +/* Compatibility note: the Apple/NeXT runtime defines sel_getName(), + sel_registerName(), object_getClassName(), object_getIndexedIvars() + in this file while the GNU runtime defines them in runtime.h. + + The reason the GNU runtime does not define them here is that they + are not basic Objective-C types (defined in this file), but are + part of the runtime API (defined in runtime.h). */ + +#ifdef __cplusplus +} +#endif + +#endif /* not __objc_INCLUDE_GNU */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/runtime.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/runtime.h new file mode 100644 index 0000000..303569a --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/runtime.h @@ -0,0 +1,1143 @@ +/* GNU Objective-C Runtime API - Modern API + Copyright (C) 2010-2017 Free Software Foundation, Inc. + Contributed by Nicola Pero + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +GCC is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public +License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#ifndef __objc_runtime_INCLUDE_GNU +#define __objc_runtime_INCLUDE_GNU + +/* + This file declares the "modern" GNU Objective-C Runtime API. + + This API replaced the "traditional" GNU Objective-C Runtime API + (which used to be declared in objc/objc-api.h) which is the one + supported by older versions of the GNU Objective-C Runtime. The + "modern" API is very similar to the API used by the modern + Apple/NeXT runtime. +*/ +#include "objc.h" +#include "objc-decls.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* An 'Ivar' represents an instance variable. It holds information + about the name, type and offset of the instance variable. */ +typedef struct objc_ivar *Ivar; + +/* A 'Property' represents a property. It holds information about the + name of the property, and its attributes. + + Compatibility Note: the Apple/NeXT runtime defines this as + objc_property_t, so we define it that way as well, but obviously + Property is the right name. */ +typedef struct objc_property *Property; +typedef struct objc_property *objc_property_t; + +/* A 'Method' represents a method. It holds information about the + name, types and the IMP of the method. */ +typedef struct objc_method *Method; + +/* A 'Category' represents a category. It holds information about the + name of the category, the class it belongs to, and the methods, + protocols and such like provided by the category. */ +typedef struct objc_category *Category; + +/* 'Protocol' is defined in objc/objc.h (which is included by this + file). */ + +/* Method descriptor returned by introspective Object methods. At the + moment, this is really just the first part of the more complete + objc_method structure used internally by the runtime. (PS: In the + GNU Objective-C Runtime, selectors already include a type, so an + objc_method_description does not add much to a SEL. But in other + runtimes, that is not the case, which is why + objc_method_description exists). */ +struct objc_method_description +{ + SEL name; /* Selector (name and signature) */ + char *types; /* Type encoding */ +}; + +/* The following are used in encode strings to describe the type of + Ivars and Methods. */ +#define _C_ID '@' +#define _C_CLASS '#' +#define _C_SEL ':' +#define _C_CHR 'c' +#define _C_UCHR 'C' +#define _C_SHT 's' +#define _C_USHT 'S' +#define _C_INT 'i' +#define _C_UINT 'I' +#define _C_LNG 'l' +#define _C_ULNG 'L' +#define _C_LNG_LNG 'q' +#define _C_ULNG_LNG 'Q' +#define _C_FLT 'f' +#define _C_DBL 'd' +#define _C_LNG_DBL 'D' +#define _C_BFLD 'b' +#define _C_BOOL 'B' +#define _C_VOID 'v' +#define _C_UNDEF '?' +#define _C_PTR '^' +#define _C_CHARPTR '*' +#define _C_ARY_B '[' +#define _C_ARY_E ']' +#define _C_UNION_B '(' +#define _C_UNION_E ')' +#define _C_STRUCT_B '{' +#define _C_STRUCT_E '}' +#define _C_VECTOR '!' +#define _C_COMPLEX 'j' + +/* _C_ATOM is never generated by the compiler. You can treat it as + equivalent to "*". */ +#define _C_ATOM '%' + +/* The following are used in encode strings to describe some + qualifiers of method and ivar types. */ +#define _C_CONST 'r' +#define _C_IN 'n' +#define _C_INOUT 'N' +#define _C_OUT 'o' +#define _C_BYCOPY 'O' +#define _C_BYREF 'R' +#define _C_ONEWAY 'V' +#define _C_GCINVISIBLE '|' + +/* The same when used as flags. */ +#define _F_CONST 0x01 +#define _F_IN 0x01 +#define _F_OUT 0x02 +#define _F_INOUT 0x03 +#define _F_BYCOPY 0x04 +#define _F_BYREF 0x08 +#define _F_ONEWAY 0x10 +#define _F_GCINVISIBLE 0x20 + + +/** Implementation: the following functions are defined inline. */ + +/* Return the class of 'object', or Nil if the object is nil. If + 'object' is a class, the meta class is returned; if 'object' is a + meta class, the root meta class is returned (note that this is + different from the traditional GNU Objective-C Runtime API function + object_get_class(), which for a meta class would return the meta + class itself). This function is inline, so it is really fast and + should be used instead of accessing object->class_pointer + directly. */ +static inline Class +object_getClass (id object) +{ + if (object != nil) + return object->class_pointer; + else + return Nil; +} + + +/** Implementation: the following functions are in selector.c. */ + +/* Return the name of a given selector. If 'selector' is NULL, return + "". */ +objc_EXPORT const char *sel_getName (SEL selector); + +/* Return the type of a given selector. Return NULL if selector is + NULL. + + Compatibility Note: the Apple/NeXT runtime has untyped selectors, + so it does not have this function, which is specific to the GNU + Runtime. */ +objc_EXPORT const char *sel_getTypeEncoding (SEL selector); + +/* This is the same as sel_registerName (). Please use + sel_registerName () instead. */ +objc_EXPORT SEL sel_getUid (const char *name); + +/* Register a selector with a given name (but unspecified types). If + you know the types, it is better to call sel_registerTypedName(). + If a selector with this name and no types already exists, it is + returned. Note that this function should really be called + 'objc_registerSelector'. Return NULL if 'name' is NULL. */ +objc_EXPORT SEL sel_registerName (const char *name); + +/* Register a selector with a given name and types. If a selector + with this name and types already exists, it is returned. Note that + this function should really be called 'objc_registerTypedSelector', + and it's called 'sel_registerTypedName' only for consistency with + 'sel_registerName'. Return NULL if 'name' is NULL. + + Compatibility Note: the Apple/NeXT runtime has untyped selectors, + so it does not have this function, which is specific to the GNU + Runtime. */ +objc_EXPORT SEL sel_registerTypedName (const char *name, const char *type); + +/* Return YES if first_selector is the same as second_selector, and NO + if not. */ +objc_EXPORT BOOL sel_isEqual (SEL first_selector, SEL second_selector); + +/* Return all the selectors with the supplied name. In the GNU + runtime, selectors are typed and there may be multiple selectors + with the same name but a different type. The return value of the + function is a pointer to an area, allocated with malloc(), that + contains all the selectors with the supplier name known to the + runtime. The list is terminated by NULL. Optionally, if you pass + a non-NULL 'numberOfReturnedSelectors' pointer, the unsigned int + that it points to will be filled with the number of selectors + returned. + + Compatibility Note: the Apple/NeXT runtime has untyped selectors, + so it does not have this function, which is specific to the GNU + Runtime. */ +objc_EXPORT SEL * sel_copyTypedSelectorList (const char *name, + unsigned int *numberOfReturnedSelectors); + +/* Return a selector with name 'name' and a non-zero type encoding, if + there is a single selector with a type, and with that name, + registered with the runtime. If there is no such selector, or if + there are multiple selectors with the same name but conflicting + types, NULL is returned. Return NULL if 'name' is NULL. + + This is useful if you have the name of the selector, and would + really like to get a selector for it that includes the type + encoding. Unfortunately, if the program contains multiple selector + with the same name but different types, sel_getTypedSelector can + not possibly know which one you need, and so will return NULL. + + Compatibility Note: the Apple/NeXT runtime has untyped selectors, + so it does not have this function, which is specific to the GNU + Runtime. */ +objc_EXPORT SEL sel_getTypedSelector (const char *name); + + +/** Implementation: the following functions are in objects.c. */ + +/* Create an instance of class 'class_', adding extraBytes to the size + of the returned object. This method allocates the appropriate + amount of memory for the instance, initializes it to zero, then + calls all the C++ constructors on appropriate C++ instance + variables of the instance (if any) (TODO: The C++ constructors bit + is not implemented yet). */ +objc_EXPORT id class_createInstance (Class class_, size_t extraBytes); + +/* Copy an object and return the copy. extraBytes should be identical + to the extraBytes parameter that was passed when creating the + original object. */ +objc_EXPORT id object_copy (id object, size_t extraBytes); + +/* Dispose of an object. This method calls the appropriate C++ + destructors on appropriate C++ instance variables of the instance + (if any) (TODO: This is not implemented yet), then frees the memory + for the instance. */ +objc_EXPORT id object_dispose (id object); + +/* Return the name of the class of 'object'. If 'object' is 'nil', + returns "Nil". */ +objc_EXPORT const char * object_getClassName (id object); + +/* Change the class of object to be class_. Return the previous class + of object. This is currently not really thread-safe. */ +objc_EXPORT Class object_setClass (id object, Class class_); + + +/** Implementation: the following functions are in ivars.c. */ + +/* Return an instance variable given the class and the instance + variable name. This is an expensive function to call, so try to + reuse the returned Ivar if you can. */ +objc_EXPORT Ivar class_getInstanceVariable (Class class_, const char *name); + +/* Return a class variable given the class and the class variable + name. This is an expensive function to call, so try to reuse the + returned Ivar if you can. + + This function always returns NULL since class variables are + currently unavailable in Objective-C. */ +objc_EXPORT Ivar class_getClassVariable (Class class_, const char *name); + +/* If the object was created in class_createInstance() with some + extraBytes, returns a pointer to them. If it was not, then the + returned pointer may make no sense. */ +objc_EXPORT void * object_getIndexedIvars (id object); + +/* Get the value of an instance variable of type 'id'. The function + returns the instance variable. To get the value of the instance + variable, you should pass as 'returnValue' a pointer to an 'id'; + the value will be copied there. Note that 'returnValue' is really + a 'void *', not a 'void **'. This function really works only with + instance variables of type 'id'; for other types of instance + variables, access directly the data at (char *)object + + ivar_getOffset (ivar). */ +objc_EXPORT Ivar object_getInstanceVariable (id object, const char *name, void **returnValue); + +/* Set the value of an instance variable. The value to set is passed + in 'newValue' (which really is an 'id', not a 'void *'). The + function returns the instance variable. This function really works + only with instance variables of type 'id'; for other types of + instance variables, access directly the data at (char *)object + + ivar_getOffset (ivar). */ +objc_EXPORT Ivar object_setInstanceVariable (id object, const char *name, void *newValue); + +/* Get the value of an instance variable of type 'id' of the object + 'object'. This is faster than object_getInstanceVariable if you + already have the instance variable because it avoids the expensive + call to class_getInstanceVariable that is done by + object_getInstanceVariable. */ +objc_EXPORT id object_getIvar (id object, Ivar variable); + +/* Set the value of an instance variable of type 'id' of the object + 'object'. This is faster than object_setInstanceVariable if you + already have the instance variable because it avoids the expensive + call to class_getInstanceVariable that is done by + object_setInstanceVariable. */ +objc_EXPORT void object_setIvar (id object, Ivar variable, id value); + +/* Return the name of the instance variable. Return NULL if + 'variable' is NULL. */ +objc_EXPORT const char * ivar_getName (Ivar variable); + +/* Return the offset of the instance variable from the start of the + object data. Return 0 if 'variable' is NULL. */ +objc_EXPORT ptrdiff_t ivar_getOffset (Ivar variable); + +/* Return the type encoding of the variable. Return NULL if + 'variable' is NULL. */ +objc_EXPORT const char * ivar_getTypeEncoding (Ivar variable); + +/* Return all the instance variables of the class. The return value + of the function is a pointer to an area, allocated with malloc(), + that contains all the instance variables of the class. It does not + include instance variables of superclasses. The list is terminated + by NULL. Optionally, if you pass a non-NULL + 'numberOfReturnedIvars' pointer, the unsigned int that it points to + will be filled with the number of instance variables returned. + Return NULL for classes still in construction (ie, allocated using + objc_allocatedClassPair() but not yet registered with the runtime + using objc_registerClassPair()). */ +objc_EXPORT Ivar * class_copyIvarList (Class class_, unsigned int *numberOfReturnedIvars); + +/* Add an instance variable with name 'ivar_name' to class 'class_', + where 'class_' is a class in construction that has been created + using objc_allocateClassPair() and has not been registered with the + runtime using objc_registerClassPair() yet. You can not add + instance variables to classes already registered with the runtime. + 'size' is the size of the instance variable, 'log_2_of_alignment' + the alignment as a power of 2 (so 0 means alignment to a 1 byte + boundary, 1 means alignment to a 2 byte boundary, 2 means alignment + to a 4 byte boundary, etc), and 'type' the type encoding of the + variable type. You can use sizeof(), log2(__alignof__()) and + @encode() to determine the right 'size', 'alignment' and 'type' for + your instance variable. For example, to add an instance variable + name "my_variable" and of type 'id', you can use: + + class_addIvar (class, "my_variable", sizeof (id), log2 ( __alignof__ (id)), + @encode (id)); + + Return YES if the variable was added, and NO if not. In + particular, return NO if 'class_' is Nil, or a meta-class or a + class not in construction. Return Nil also if 'ivar_name' or + 'type' is NULL, or 'size' is 0. + */ +objc_EXPORT BOOL class_addIvar (Class class_, const char * ivar_name, size_t size, + unsigned char log_2_of_alignment, const char *type); + +/* Return the name of the property. Return NULL if 'property' is + NULL. */ +objc_EXPORT const char * property_getName (Property property); + +/* Return the attributes of the property as a string. Return NULL if + 'property' is NULL. */ +objc_EXPORT const char * property_getAttributes (Property property); + +/* Return the property with name 'propertyName' of the class 'class_'. + This function returns NULL if the required property can not be + found. Return NULL if 'class_' or 'propertyName' is NULL. + + Note that the traditional ABI does not store the list of properties + of a class in a compiled module, so the traditional ABI will always + return NULL. */ +objc_EXPORT Property class_getProperty (Class class_, const char *propertyName); + +/* Return all the properties of the class. The return value + of the function is a pointer to an area, allocated with malloc(), + that contains all the properties of the class. It does not + include properties of superclasses. The list is terminated + by NULL. Optionally, if you pass a non-NULL + 'numberOfReturnedIvars' pointer, the unsigned int that it points to + will be filled with the number of properties returned. + + Note that the traditional ABI does not store the list of properties + of a class in a compiled module, so the traditional ABI will always + return an empty list. */ +objc_EXPORT Property * class_copyPropertyList +(Class class_, unsigned int *numberOfReturnedProperties); + +/* Return the ivar layout for class 'class_'. + + At the moment this function always returns NULL. */ +objc_EXPORT const char * class_getIvarLayout (Class class_); + +/* Return the weak ivar layout for class 'class_'. + + At the moment this function always returns NULL. */ +objc_EXPORT const char * class_getWeakIvarLayout (Class class_); + +/* Set the ivar layout for class 'class_'. + + At the moment, this function does nothing. */ +objc_EXPORT void class_setIvarLayout (Class class_, const char *layout); + +/* Set the weak ivar layout for class 'class_'. + + At the moment, this function does nothing. With the GNU runtime, + you should use class_ivar_set_gcinvisible () to hide variables from + the Garbage Collector. */ +objc_EXPORT void class_setWeakIvarLayout (Class class_, const char *layout); + + +/** Implementation: the following functions are in class.c. */ + +/* Compatibility Note: The Apple/NeXT runtime does not have + objc_get_unknown_class_handler and + objc_setGetUnknownClassHandler(). They provide functionality that + the traditional GNU Objective-C Runtime API used to provide via the + _objc_lookup_class hook. */ + +/* An 'objc_get_unknown_class_handler' function is used by + objc_getClass() to get a class that is currently unknown to the + compiler. You could use it for example to have the class loaded by + dynamically loading a library. 'class_name' is the name of the + class. The function should return the Class object if it manages to + load the class, and Nil if not. */ +typedef Class (*objc_get_unknown_class_handler)(const char *class_name); + +/* Sets a new handler function for getting unknown classes (to be used + by objc_getClass () and related), and returns the previous one. + This function is not safe to call in a multi-threaded environment + because other threads may be trying to use the get unknown class + handler while you change it! */ +objc_EXPORT +objc_get_unknown_class_handler +objc_setGetUnknownClassHandler (objc_get_unknown_class_handler new_handler); + +/* Return the class with name 'name', if it is already registered with + the runtime. If it is not registered, and + objc_setGetUnknownClassHandler() has been called to set a handler + for unknown classes, the handler is called to give it a chance to + load the class in some other way. If the class is not known to the + runtime and the handler is not set or returns Nil, objc_getClass() + returns Nil. */ +objc_EXPORT Class objc_getClass (const char *name); + +/* Return the class with name 'name', if it is already registered with + the runtime. Return Nil if not. This function does not call the + objc_get_unknown_class_handler function if the class is not + found. */ +objc_EXPORT Class objc_lookUpClass (const char *name); + +/* Return the meta class associated to the class with name 'name', if + it is already registered with the runtime. First, it finds the + class using objc_getClass(). Then, it returns the associated meta + class. If the class could not be found using objc_getClass(), + returns Nil. */ +objc_EXPORT Class objc_getMetaClass (const char *name); + +/* This is identical to objc_getClass(), but if the class is not found, + it aborts the process instead of returning Nil. */ +objc_EXPORT Class objc_getRequiredClass (const char *name); + +/* If 'returnValue' is NULL, 'objc_getClassList' returns the number of + classes currently registered with the runtime. If 'returnValue' is + not NULL, it should be a (Class *) pointer to an area of memory + which can contain up to 'maxNumberOfClassesToReturn' Class records. + 'objc_getClassList' will fill the area pointed to by 'returnValue' + with all the Classes registered with the runtime (or up to + maxNumberOfClassesToReturn if there are more than + maxNumberOfClassesToReturn). The function return value is the + number of classes actually returned in 'returnValue'. */ +objc_EXPORT int objc_getClassList (Class *returnValue, int maxNumberOfClassesToReturn); + +/* Compatibility Note: The Apple/NeXT runtime also has + + Class objc_getFutureClass (const char *name); + void objc_setFutureClass (Class class_, const char *name); + + the documentation is unclear on what they are supposed to do, and + the GNU Objective-C Runtime currently does not provide them. */ + +/* Return the name of the class 'class_', or the string "nil" if the + class_ is Nil. */ +objc_EXPORT const char * class_getName (Class class_); + +/* Return YES if 'class_' is a meta class, and NO if not. If 'class_' + is Nil, return NO. */ +objc_EXPORT BOOL class_isMetaClass (Class class_); + +/* Return the superclass of 'class_'. If 'class_' is Nil, or it is a + root class, return Nil. This function also works if 'class_' is a + class being constructed, that is, a class returned by + objc_allocateClassPair() but before it has been registered with the + runtime using objc_registerClassPair(). */ +objc_EXPORT Class class_getSuperclass (Class class_); + +/* Return the 'version' number of the class, which is an integer that + can be used to track changes in the class API, methods and + variables. If class_ is Nil, return 0. If class_ is not Nil, the + version is 0 unless class_setVersion() has been called to set a + different one. + + Please note that internally the version is a long, but the API only + allows you to set and retrieve int values. */ +objc_EXPORT int class_getVersion (Class class_); + +/* Set the 'version' number of the class, which is an integer that can + be used to track changes in the class API, methods and variables. + If 'class_' is Nil, does nothing. + + This is typically used internally by "Foundation" libraries such as + GNUstep Base to support serialization / deserialization of objects + that work across changes in the classes. If you are using such a + library, you probably want to use their versioning API, which may + be based on this one, but is integrated with the rest of the + library. + + Please note that internally the version is a long, but the API only + allows you to set and retrieve int values. */ +objc_EXPORT void class_setVersion (Class class_, int version); + +/* Return the size in bytes (a byte is the size of a char) of an + instance of the class. If class_ is Nil, return 0; else it return + a non-zero number (since the 'isa' instance variable is required + for all classes). */ +objc_EXPORT size_t class_getInstanceSize (Class class_); + +/* Change the implementation of the method. It also searches all + classes for any class implementing the method, and replaces the + existing implementation with the new one. For that to work, + 'method' must be a method returned by class_getInstanceMethod() or + class_getClassMethod() as the matching is done by comparing the + pointers; in that case, only the implementation in the class is + modified. Return the previous implementation that has been + replaced. If method or implementation is NULL, do nothing and + return NULL. */ +objc_EXPORT IMP +method_setImplementation (Method method, IMP implementation); + +/* Swap the implementation of two methods in a single, atomic + operation. This is equivalent to getting the implementation of + each method and then calling method_setImplementation() on the + other one. For this to work, the two methods must have been + returned by class_getInstanceMethod() or class_getClassMethod(). + If 'method_a' or 'method_b' is NULL, do nothing. */ +objc_EXPORT void +method_exchangeImplementations (Method method_a, Method method_b); + +/* Create a new class/meta-class pair. This function is called to + create a new class at runtime. The class is created with + superclass 'superclass' (use 'Nil' to create a new root class) and + name 'class_name'. 'extraBytes' can be used to specify some extra + space for indexed variables to be added at the end of the class and + meta-class objects (it is recommended that you set extraBytes to + 0). Once you have created the class, it is not usable yet. You + need to add any instance variables (by using class_addIvar()), any + instance methods (by using class_addMethod()) and any class methods + (by using class_addMethod() on the meta-class, as in + class_addMethod (object_getClass (class), method)) that are + required, and then you need to call objc_registerClassPair() to + activate the class. If you need to create a hierarchy of classes, + you need to create and register them one at a time. You can not + create a new class using another class in construction as + superclass. Return Nil if 'class-name' is NULL or if a class with + that name already exists or 'superclass' is a class still in + construction. + + Implementation Note: in the GNU runtime, allocating a class pair + only creates the structures for the class pair, but does not + register anything with the runtime. The class is registered with + the runtime only when objc_registerClassPair() is called. In + particular, if a class is in construction, objc_getClass() will not + find it, the superclass will not know about it, + class_getSuperclass() will return Nil and another thread may + allocate a class pair with the same name; the conflict will only be + detected when the classes are registered with the runtime. + */ +objc_EXPORT Class +objc_allocateClassPair (Class super_class, const char *class_name, + size_t extraBytes); + +/* Register a class pair that was created with + objc_allocateClassPair(). After you register a class, you can no + longer make changes to its instance variables, but you can start + creating instances of it. Do nothing if 'class_' is NULL or if it + is not a class allocated by objc_allocateClassPair() and still in + construction. */ +objc_EXPORT void +objc_registerClassPair (Class class_); + +/* Dispose of a class pair created using objc_allocateClassPair(). + Call this function if you started creating a new class with + objc_allocateClassPair() but then want to abort the process. You + should not access 'class_' after calling this method. Note that if + 'class_' has already been registered with the runtime via + objc_registerClassPair(), this function does nothing; you can only + dispose of class pairs that are still being constructed. Do + nothing if class is 'Nil' or if 'class_' is not a class being + constructed. */ +objc_EXPORT void +objc_disposeClassPair (Class class_); + +/* Compatibility Note: The Apple/NeXT runtime has the function + objc_duplicateClass () but it's undocumented. The GNU runtime does + not have it. */ + + +/** Implementation: the following functions are in sendmsg.c. */ + +/* Return the instance method with selector 'selector' of class + 'class_', or NULL if the class (or one of its superclasses) does + not implement the method. Return NULL if class_ is Nil or selector + is NULL. Calling this function may trigger a call to + +resolveInstanceMethod:, but does not return a forwarding + function. */ +objc_EXPORT Method class_getInstanceMethod (Class class_, SEL selector); + +/* Return the class method with selector 'selector' of class 'class_', + or NULL if the class (or one of its superclasses) does not + implement the method. Return NULL if class_ is Nil or selector is + NULL. Calling this function may trigger a call to + +resolveClassMethod:, but does not return a forwarding + function. */ +objc_EXPORT Method class_getClassMethod (Class class_, SEL selector); + +/* Return the IMP (pointer to the function implementing a method) for + the instance method with selector 'selector' in class 'class_'. + This is the same routine that is used while messaging, and should + be very fast. Note that you most likely would need to cast the + return function pointer to a function pointer with the appropriate + arguments and return type before calling it. To get a class + method, you can pass the meta-class as the class_ argument (ie, use + class_getMethodImplementation (object_getClass (class_), + selector)). Return NULL if class_ is Nil or selector is NULL. + This function first looks for an existing method; if it is not + found, it calls +resolveClassMethod: or +resolveInstanceMethod: + (depending on whether a class or instance method is being looked + up) if it is implemented. If the method returns YES, then it tries + the look up again (the assumption being that +resolveClassMethod: + or resolveInstanceMethod: will add the method using + class_addMethod()). If it is still not found, it returns a + forwarding function. */ +objc_EXPORT IMP class_getMethodImplementation (Class class_, SEL selector); + +/* Compatibility Note: the Apple/NeXT runtime has the function + class_getMethodImplementation_stret () which currently does not + exist on the GNU runtime because the messaging implementation is + different. */ + +/* Return YES if class 'class_' has an instance method implementing + selector 'selector', and NO if not. Return NO if class_ is Nil or + selector is NULL. If you need to check a class method, use the + meta-class as the class_ argument (ie, use class_respondsToSelector + (object_getClass (class_), selector)). */ +objc_EXPORT BOOL class_respondsToSelector (Class class_, SEL selector); + +/* Add a method to a class. Use this function to add a new method to + a class (potentially overriding a method with the same selector in + the superclass); if you want to modify an existing method, use + method_setImplementation() instead (or class_replaceMethod ()). + This method adds an instance method to 'class_'; to add a class + method, get the meta class first, then add the method to the meta + class, that is, use + + class_addMethod (object_getClass (class_), selector, + implementation, type); + + Return YES if the method was added, and NO if not. Do nothing if + one of the arguments is NULL. */ +objc_EXPORT BOOL class_addMethod (Class class_, SEL selector, IMP implementation, + const char *method_types); + +/* Replace a method in a class. If the class already have a method + with this 'selector', find it and use method_setImplementation() to + replace the implementation with 'implementation' (method_types is + ignored in that case). If the class does not already have a method + with this 'selector', call 'class_addMethod() to add it. + + Return the previous implementation of the method, or NULL if none + was found. Return NULL if any of the arguments is NULL. */ +objc_EXPORT IMP class_replaceMethod (Class class_, SEL selector, IMP implementation, + const char *method_types); + + +/** Implementation: the following functions are in methods.c. */ + +/* Return the selector for method 'method'. Return NULL if 'method' + is NULL. + + This function is misnamed; it should be called + 'method_getSelector'. To get the actual name, get the selector, + then the name from the selector (ie, use sel_getName + (method_getName (method))). */ +objc_EXPORT SEL method_getName (Method method); + +/* Return the IMP of the method. Return NULL if 'method' is NULL. */ +objc_EXPORT IMP method_getImplementation (Method method); + +/* Return the type encoding of the method. Return NULL if 'method' is + NULL. */ +objc_EXPORT const char * method_getTypeEncoding (Method method); + +/* Return a method description for the method. Return NULL if + 'method' is NULL. */ +objc_EXPORT struct objc_method_description * method_getDescription (Method method); + +/* Return all the instance methods of the class. The return value of + the function is a pointer to an area, allocated with malloc(), that + contains all the instance methods of the class. It does not + include instance methods of superclasses. The list is terminated + by NULL. Optionally, if you pass a non-NULL + 'numberOfReturnedMethods' pointer, the unsigned int that it points + to will be filled with the number of instance methods returned. To + get the list of class methods, pass the meta-class in the 'class_' + argument, (ie, use class_copyMethodList (object_getClass (class_), + &numberOfReturnedMethods)). */ +objc_EXPORT Method * class_copyMethodList (Class class_, unsigned int *numberOfReturnedMethods); + + +/** Implementation: the following functions are in encoding.c. */ + +/* Return the number of arguments that the method 'method' expects. + Note that all methods need two implicit arguments ('self' for the + receiver, and '_cmd' for the selector). Return 0 if 'method' is + NULL. */ +objc_EXPORT unsigned int method_getNumberOfArguments (Method method); + +/* Return the string encoding for the return type of method 'method'. + The string is a standard zero-terminated string in an area of + memory allocated with malloc(); you should free it with free() when + you finish using it. Return an empty string if method is NULL. */ +objc_EXPORT char * method_copyReturnType (Method method); + +/* Return the string encoding for the argument type of method + 'method', argument number 'argumentNumber' ('argumentNumber' is 0 + for self, 1 for _cmd, and 2 or more for the additional arguments if + any). The string is a standard zero-terminated string in an area + of memory allocated with malloc(); you should free it with free() + when you finish using it. Return an empty string if method is NULL + or if 'argumentNumber' refers to a non-existing argument. */ +objc_EXPORT char * method_copyArgumentType (Method method, unsigned int argumentNumber); + +/* Return the string encoding for the return type of method 'method'. + The string is returned by copying it into the supplied + 'returnValue' string, which is of size 'returnValueSize'. No more + than 'returnValueSize' characters are copied; if the encoding is + smaller than 'returnValueSize', the rest of 'returnValue' is filled + with zeros. If it is bigger, it is truncated (and would not be + zero-terminated). You should supply a big enough + 'returnValueSize'. If the method is NULL, returnValue is set to a + string of zeros. */ +objc_EXPORT void method_getReturnType (Method method, char *returnValue, + size_t returnValueSize); + +/* Return the string encoding for the argument type of method + 'method', argument number 'argumentNumber' ('argumentNumber' is 0 + for self, 1 for _cmd, and 2 or more for the additional arguments if + any). The string is returned by copying it into the supplied + 'returnValue' string, which is of size 'returnValueSize'. No more + than 'returnValueSize' characters are copied; if the encoding is + smaller than 'returnValueSize', the rest of 'returnValue' is filled + with zeros. If it is bigger, it is truncated (and would not be + zero-terminated). You should supply a big enough + 'returnValueSize'. If the method is NULL, returnValue is set to a + string of zeros. */ +objc_EXPORT void method_getArgumentType (Method method, unsigned int argumentNumber, + char *returnValue, size_t returnValueSize); + + +/** Implementation: the following functions are in protocols.c. */ + +/* Return the protocol with name 'name', or nil if it the protocol is + not known to the runtime. */ +objc_EXPORT Protocol *objc_getProtocol (const char *name); + +/* Return all the protocols known to the runtime. The return value of + the function is a pointer to an area, allocated with malloc(), that + contains all the protocols known to the runtime; the list is + terminated by NULL. You should free this area using free() once + you no longer need it. Optionally, if you pass a non-NULL + 'numberOfReturnedProtocols' pointer, the unsigned int that it + points to will be filled with the number of protocols returned. If + there are no protocols known to the runtime, NULL is returned. */ +objc_EXPORT Protocol **objc_copyProtocolList (unsigned int *numberOfReturnedProtocols); + +/* Add a protocol to a class, and return YES if it was done + successfully, and NO if not. At the moment, NO should only happen + if class_ or protocol are nil, if the protocol is not a Protocol + object or if the class already conforms to the protocol. */ +objc_EXPORT BOOL class_addProtocol (Class class_, Protocol *protocol); + +/* Return YES if the class 'class_' conforms to Protocol 'protocol', + and NO if not. This function does not check superclasses; if you + want to check for superclasses (in the way that [NSObject + +conformsToProtocol:] does) you need to iterate over the class + hierarchy using class_getSuperclass(), and call + class_conformsToProtocol() for each of them. */ +objc_EXPORT BOOL class_conformsToProtocol (Class class_, Protocol *protocol); + +/* Return all the protocols that the class conforms to. The return + value of the function is a pointer to an area, allocated with + malloc(), that contains all the protocols formally adopted by the + class. It does not include protocols adopted by superclasses. The + list is terminated by NULL. Optionally, if you pass a non-NULL + 'numberOfReturnedProtocols' pointer, the unsigned int that it + points to will be filled with the number of protocols returned. + This function does not return protocols that superclasses conform + to. */ +objc_EXPORT Protocol **class_copyProtocolList (Class class_, unsigned int *numberOfReturnedProtocols); + +/* Return YES if protocol 'protocol' conforms to protocol + 'anotherProtocol', and NO if not. Note that if one of the two + protocols is nil, it returns NO. */ +objc_EXPORT BOOL protocol_conformsToProtocol (Protocol *protocol, Protocol *anotherProtocol); + +/* Return YES if protocol 'protocol' is the same as protocol + 'anotherProtocol', and 'NO' if not. Note that it returns YES if + the two protocols are both nil. */ +objc_EXPORT BOOL protocol_isEqual (Protocol *protocol, Protocol *anotherProtocol); + +/* Return the name of protocol 'protocol'. If 'protocol' is nil or is + not a Protocol, return NULL. */ +objc_EXPORT const char *protocol_getName (Protocol *protocol); + +/* Return the method description for the method with selector + 'selector' in protocol 'protocol'; if 'requiredMethod' is YES, the + function searches the list of required methods; if NO, the list of + optional methods. If 'instanceMethod' is YES, the function search + for an instance method; if NO, for a class method. If there is no + matching method, an objc_method_description structure with both + name and types set to NULL is returned. This function will only + find methods that are directly declared in the protocol itself, not + in other protocols that this protocol adopts. + + Note that the traditional ABI does not store the list of optional + methods of a protocol in a compiled module, so the traditional ABI + will always return (NULL, NULL) when requiredMethod == NO. */ +objc_EXPORT struct objc_method_description protocol_getMethodDescription (Protocol *protocol, + SEL selector, + BOOL requiredMethod, + BOOL instanceMethod); + +/* Return the method descriptions of all the methods of the protocol. + The return value of the function is a pointer to an area, allocated + with malloc(), that contains all the method descriptions of the + methods of the protocol. It does not recursively include methods + of the protocols adopted by this protocol. The list is terminated + by a NULL objc_method_description (one with both fields set to + NULL). Optionally, if you pass a non-NULL + 'numberOfReturnedMethods' pointer, the unsigned int that it points + to will be filled with the number of properties returned. + + Note that the traditional ABI does not store the list of optional + methods of a protocol in a compiled module, so the traditional ABI + will always return an empty list if requiredMethod is set to + NO. */ +objc_EXPORT struct objc_method_description *protocol_copyMethodDescriptionList (Protocol *protocol, + BOOL requiredMethod, + BOOL instanceMethod, + unsigned int *numberOfReturnedMethods); + +/* Return the property with name 'propertyName' of the protocol + 'protocol'. If 'requiredProperty' is YES, the function searches + the list of required properties; if NO, the list of optional + properties. If 'instanceProperty' is YES, the function searches + the list of instance properties; if NO, the list of class + properties. At the moment, optional properties and class + properties are not part of the Objective-C language, so both + 'requiredProperty' and 'instanceProperty' should be set to YES. + This function returns NULL if the required property can not be + found. + + Note that the traditional ABI does not store the list of properties + of a protocol in a compiled module, so the traditional ABI will + always return NULL. */ +objc_EXPORT Property protocol_getProperty (Protocol *protocol, const char *propertyName, + BOOL requiredProperty, BOOL instanceProperty); + +/* Return all the properties of the protocol. The return value of the + function is a pointer to an area, allocated with malloc(), that + contains all the properties of the protocol. It does not + recursively include properties of the protocols adopted by this + protocol. The list is terminated by NULL. Optionally, if you pass + a non-NULL 'numberOfReturnedProperties' pointer, the unsigned int + that it points to will be filled with the number of properties + returned. + + Note that the traditional ABI does not store the list of properties + of a protocol in a compiled module, so the traditional ABI will + always return NULL and store 0 in numberOfReturnedProperties. */ +objc_EXPORT Property *protocol_copyPropertyList (Protocol *protocol, unsigned int *numberOfReturnedProperties); + +/* Return all the protocols that the protocol conforms to. The return + value of the function is a pointer to an area, allocated with + malloc(), that contains all the protocols formally adopted by the + protocol. It does not recursively include protocols adopted by the + protocols adopted by this protocol. The list is terminated by + NULL. Optionally, if you pass a non-NULL + 'numberOfReturnedProtocols' pointer, the unsigned int that it + points to will be filled with the number of protocols returned. */ +objc_EXPORT Protocol **protocol_copyProtocolList (Protocol *protocol, unsigned int *numberOfReturnedProtocols); + + +/** Implementation: the following hook is in init.c. */ + +/* This is a hook which is called by __objc_exec_class every time a + class or a category is loaded into the runtime. This may e.g. help + a dynamic loader determine the classes that have been loaded when + an object file is dynamically linked in. */ +objc_EXPORT void (*_objc_load_callback)(Class _class, struct objc_category *category); + + +/** Implementation: the following functions are in objc-foreach.c. */ + +/* 'objc_enumerationMutation()' is called when a collection is + mutated while being "fast enumerated". That is a hard error, and + objc_enumerationMutation is called to deal with it. 'collection' + is the collection object that was mutated during an enumeration. + + objc_enumerationMutation() will invoke the mutation handler if any + is set. Then, it will abort the program. + + Compatibility note: the Apple runtime will not abort the program + after calling the mutation handler. */ +objc_EXPORT void objc_enumerationMutation (id collection); + +/* 'objc_set_enumeration_mutation_handler' can be used to set a + function that will be called (instead of aborting) when a fast + enumeration is mutated during enumeration. The handler will be + called with the 'collection' being mutated as the only argument and + it should not return; it should either exit the program, or could + throw an exception. The recommended implementation is to throw an + exception - the user can then use exception handlers to deal with + it. + + This function is not thread safe (other threads may be trying to + invoke the enumeration mutation handler while you are changing it!) + and should be called during during the program initialization + before threads are started. It is mostly reserved for "Foundation" + libraries; in the case of GNUstep, GNUstep Base may be using this + function to improve the standard enumeration mutation handling. + You probably shouldn't use this function unless you are writing + your own Foundation library. */ +objc_EXPORT void objc_setEnumerationMutationHandler (void (*handler)(id)); + +/* This structure (used during fast enumeration) is automatically + defined by the compiler (it is as if this definition was always + included in all Objective-C files). Note that it is usually + defined again with the name of NSFastEnumeration by "Foundation" + libraries such as GNUstep Base. And if NSFastEnumeration is + defined, the compiler will use it instead of + __objcFastEnumerationState when doing fast enumeration. */ +/* +struct __objcFastEnumerationState +{ + unsigned long state; + id *itemsPtr; + unsigned long *mutationsPtr; + unsigned long extra[5]; +}; +*/ + + +/* Compatibility Note: The Apple/NeXT runtime has the functions + objc_copyImageNames (), class_getImageName () and + objc_copyClassNamesForImage () but they are undocumented. The GNU + runtime does not have them at the moment. */ + +/* Compatibility Note: The Apple/NeXT runtime has the functions + objc_setAssociatedObject (), objc_getAssociatedObject (), + objc_removeAssociatedObjects () and the objc_AssociationPolicy type + and related enum. The GNU runtime does not have them yet. + TODO: Implement them. */ + +/* Compatibility Note: The Apple/NeXT runtime has the function + objc_setForwardHandler (). The GNU runtime does not have it + because messaging (and, in particular, forwarding) works in a + different (incompatible) way with the GNU runtime. If you need to + customize message forwarding at the Objective-C runtime level (that + is, if you are implementing your own "Foundation" library such as + GNUstep Base on top of the Objective-C runtime), in objc/message.h + there are hooks (that work in the framework of the GNU runtime) to + do so. */ + + +/** Implementation: the following functions are in memory.c. */ + +/* Traditional GNU Objective-C Runtime functions that are used for + memory allocation and disposal. These functions are used in the + same way as you use malloc, realloc, calloc and free and make sure + that memory allocation works properly with the garbage + collector. + + Compatibility Note: these functions are not available with the + Apple/NeXT runtime. */ + +objc_EXPORT void *objc_malloc(size_t size); + +/* FIXME: Shouldn't the following be called objc_malloc_atomic ? The + GC function is GC_malloc_atomic() which makes sense. + */ +objc_EXPORT void *objc_atomic_malloc(size_t size); + +objc_EXPORT void *objc_realloc(void *mem, size_t size); + +objc_EXPORT void *objc_calloc(size_t nelem, size_t size); + +objc_EXPORT void objc_free(void *mem); + + +/** Implementation: the following functions are in gc.c. */ + +/* The GNU Objective-C Runtime has a different implementation of + garbage collection. + + Compatibility Note: these functions are not available with the + Apple/NeXT runtime. */ + +/* Mark the instance variable as inaccessible to the garbage + collector. */ +objc_EXPORT void class_ivar_set_gcinvisible (Class _class, + const char* ivarname, + BOOL gcInvisible); + + +/** Implementation: the following functions are in encoding.c. */ + +/* Traditional GNU Objective-C Runtime functions that are currently + used to implement method forwarding. + + Compatibility Note: these functions are not available with the + Apple/NeXT runtime. */ + +/* Return the size of a variable which has the specified 'type' + encoding. */ +objc_EXPORT int objc_sizeof_type (const char *type); + +/* Return the align of a variable which has the specified 'type' + encoding. */ +objc_EXPORT int objc_alignof_type (const char *type); + +/* Return the aligned size of a variable which has the specified + 'type' encoding. The aligned size is the size rounded up to the + nearest alignment. */ +objc_EXPORT int objc_aligned_size (const char *type); + +/* Return the promoted size of a variable which has the specified + 'type' encoding. This is the size rounded up to the nearest + integral of the wordsize, taken to be the size of a void *. */ +objc_EXPORT int objc_promoted_size (const char *type); + + +/* The following functions are used when parsing the type encoding of + methods, to skip over parts that are ignored. They take as + argument a pointer to a location inside the type encoding of a + method (which is a string) and return a new pointer, pointing to a + new location inside the string after having skipped the unwanted + information. */ + +/* Skip some type qualifiers (_C_CONST, _C_IN, etc). These may + eventually precede typespecs occurring in method prototype + encodings. */ +objc_EXPORT const char *objc_skip_type_qualifiers (const char *type); + +/* Skip one typespec element (_C_CLASS, _C_SEL, etc). If the typespec + is prepended by type qualifiers, these are skipped as well. */ +objc_EXPORT const char *objc_skip_typespec (const char *type); + +/* Skip an offset. */ +objc_EXPORT const char *objc_skip_offset (const char *type); + +/* Skip an argument specification (ie, skipping a typespec, which may + include qualifiers, and an offset too). */ +objc_EXPORT const char *objc_skip_argspec (const char *type); + +/* Read type qualifiers (_C_CONST, _C_IN, etc) from string 'type' + (stopping at the first non-type qualifier found) and return an + unsigned int which is the logical OR of all the corresponding flags + (_F_CONST, _F_IN etc). */ +objc_EXPORT unsigned objc_get_type_qualifiers (const char *type); + + +/* Note that the following functions work for very simple structures, + but get easily confused by more complicated ones (for example, + containing vectors). A better solution is required. These + functions are likely to change in the next GCC release. */ + +/* The following three functions can be used to determine how a + structure is laid out by the compiler. For example: + + struct objc_struct_layout layout; + int i; + + objc_layout_structure (type, &layout); + while (objc_layout_structure_next_member (&layout)) + { + int position, align; + const char *type; + + objc_layout_structure_get_info (&layout, &position, &align, &type); + printf ("element %d has offset %d, alignment %d\n", + i++, position, align); + } + + These functions are used by objc_sizeof_type and objc_alignof_type + functions to compute the size and alignment of structures. The + previous method of computing the size and alignment of a structure + was not working on some architectures, particularly on AIX, and in + the presence of bitfields inside the structure. */ +struct objc_struct_layout +{ + const char *original_type; + const char *type; + const char *prev_type; + unsigned int record_size; + unsigned int record_align; +}; + +objc_EXPORT void objc_layout_structure (const char *type, + struct objc_struct_layout *layout); +objc_EXPORT BOOL objc_layout_structure_next_member (struct objc_struct_layout *layout); +objc_EXPORT void objc_layout_finish_structure (struct objc_struct_layout *layout, + unsigned int *size, + unsigned int *align); +objc_EXPORT void objc_layout_structure_get_info (struct objc_struct_layout *layout, + unsigned int *offset, + unsigned int *align, + const char **type); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/thr.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/thr.h new file mode 100644 index 0000000..e4bd8f0 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/objc/thr.h @@ -0,0 +1,116 @@ +/* Thread and mutex controls for Objective C. + Copyright (C) 1996-2017 Free Software Foundation, Inc. + Contributed by Galen C. Hunt (gchunt@cs.rochester.edu) + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +#ifndef __thread_INCLUDE_GNU +#define __thread_INCLUDE_GNU + +#include "objc.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/************************************************************************* + * Universal static variables: + */ +extern int __objc_thread_exit_status; /* Global exit status. */ + +/******** + * Thread safe implementation types and functions. + */ + +/* Thread priorities */ +#define OBJC_THREAD_INTERACTIVE_PRIORITY 2 +#define OBJC_THREAD_BACKGROUND_PRIORITY 1 +#define OBJC_THREAD_LOW_PRIORITY 0 + +/* A thread */ +typedef void * objc_thread_t; + +/* This structure represents a single mutual exclusion lock. */ +struct objc_mutex +{ + volatile objc_thread_t owner; /* Id of thread that owns. */ + volatile int depth; /* # of acquires. */ + void * backend; /* Specific to backend */ +}; +typedef struct objc_mutex *objc_mutex_t; + +/* This structure represents a single condition mutex */ +struct objc_condition +{ + void * backend; /* Specific to backend */ +}; +typedef struct objc_condition *objc_condition_t; + +/* Frontend mutex functions */ +objc_mutex_t objc_mutex_allocate (void); +int objc_mutex_deallocate (objc_mutex_t mutex); +int objc_mutex_lock (objc_mutex_t mutex); +int objc_mutex_unlock (objc_mutex_t mutex); +int objc_mutex_trylock (objc_mutex_t mutex); + +/* Frontend condition mutex functions */ +objc_condition_t objc_condition_allocate (void); +int objc_condition_deallocate (objc_condition_t condition); +int objc_condition_wait (objc_condition_t condition, objc_mutex_t mutex); +int objc_condition_signal (objc_condition_t condition); +int objc_condition_broadcast (objc_condition_t condition); + +/* Frontend thread functions */ +objc_thread_t objc_thread_detach (SEL selector, id object, id argument); +void objc_thread_yield (void); +int objc_thread_exit (void); +int objc_thread_set_priority (int priority); +int objc_thread_get_priority (void); +void * objc_thread_get_data (void); +int objc_thread_set_data (void *value); +objc_thread_t objc_thread_id (void); +void objc_thread_add (void); +void objc_thread_remove (void); + +/* + Use this to set the hook function that will be called when the + runtime initially becomes multi threaded. + The hook function is only called once, meaning only when the + 2nd thread is spawned, not for each and every thread. + + It returns the previous hook function or NULL if there is none. + + A program outside of the runtime could set this to some function so + it can be informed; for example, the GNUstep Base Library sets it + so it can implement the NSBecomingMultiThreaded notification. + */ +typedef void (*objc_thread_callback) (void); +objc_thread_callback objc_set_thread_callback (objc_thread_callback func); + +/* Backend initialization functions */ +int __objc_init_thread_system (void); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* not __thread_INCLUDE_GNU */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/omp.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/omp.h new file mode 100644 index 0000000..ce524ea --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/omp.h @@ -0,0 +1,165 @@ +/* Copyright (C) 2005-2017 Free Software Foundation, Inc. + Contributed by Richard Henderson . + + This file is part of the GNU Offloading and Multi Processing Library + (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _OMP_H +#define _OMP_H 1 + +#ifndef _LIBGOMP_OMP_LOCK_DEFINED +#define _LIBGOMP_OMP_LOCK_DEFINED 1 +/* These two structures get edited by the libgomp build process to + reflect the shape of the two types. Their internals are private + to the library. */ + +typedef struct +{ + unsigned char _x[4] + __attribute__((__aligned__(4))); +} omp_lock_t; + +typedef struct +{ + unsigned char _x[12] + __attribute__((__aligned__(4))); +} omp_nest_lock_t; +#endif + +typedef enum omp_sched_t +{ + omp_sched_static = 1, + omp_sched_dynamic = 2, + omp_sched_guided = 3, + omp_sched_auto = 4 +} omp_sched_t; + +typedef enum omp_proc_bind_t +{ + omp_proc_bind_false = 0, + omp_proc_bind_true = 1, + omp_proc_bind_master = 2, + omp_proc_bind_close = 3, + omp_proc_bind_spread = 4 +} omp_proc_bind_t; + +typedef enum omp_lock_hint_t +{ + omp_lock_hint_none = 0, + omp_lock_hint_uncontended = 1, + omp_lock_hint_contended = 2, + omp_lock_hint_nonspeculative = 4, + omp_lock_hint_speculative = 8, +} omp_lock_hint_t; + +#ifdef __cplusplus +extern "C" { +# define __GOMP_NOTHROW throw () +#else +# define __GOMP_NOTHROW __attribute__((__nothrow__)) +#endif + +extern void omp_set_num_threads (int) __GOMP_NOTHROW; +extern int omp_get_num_threads (void) __GOMP_NOTHROW; +extern int omp_get_max_threads (void) __GOMP_NOTHROW; +extern int omp_get_thread_num (void) __GOMP_NOTHROW; +extern int omp_get_num_procs (void) __GOMP_NOTHROW; + +extern int omp_in_parallel (void) __GOMP_NOTHROW; + +extern void omp_set_dynamic (int) __GOMP_NOTHROW; +extern int omp_get_dynamic (void) __GOMP_NOTHROW; + +extern void omp_set_nested (int) __GOMP_NOTHROW; +extern int omp_get_nested (void) __GOMP_NOTHROW; + +extern void omp_init_lock (omp_lock_t *) __GOMP_NOTHROW; +extern void omp_init_lock_with_hint (omp_lock_t *, omp_lock_hint_t) + __GOMP_NOTHROW; +extern void omp_destroy_lock (omp_lock_t *) __GOMP_NOTHROW; +extern void omp_set_lock (omp_lock_t *) __GOMP_NOTHROW; +extern void omp_unset_lock (omp_lock_t *) __GOMP_NOTHROW; +extern int omp_test_lock (omp_lock_t *) __GOMP_NOTHROW; + +extern void omp_init_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW; +extern void omp_init_nest_lock_with_hint (omp_lock_t *, omp_lock_hint_t) + __GOMP_NOTHROW; +extern void omp_destroy_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW; +extern void omp_set_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW; +extern void omp_unset_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW; +extern int omp_test_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW; + +extern double omp_get_wtime (void) __GOMP_NOTHROW; +extern double omp_get_wtick (void) __GOMP_NOTHROW; + +extern void omp_set_schedule (omp_sched_t, int) __GOMP_NOTHROW; +extern void omp_get_schedule (omp_sched_t *, int *) __GOMP_NOTHROW; +extern int omp_get_thread_limit (void) __GOMP_NOTHROW; +extern void omp_set_max_active_levels (int) __GOMP_NOTHROW; +extern int omp_get_max_active_levels (void) __GOMP_NOTHROW; +extern int omp_get_level (void) __GOMP_NOTHROW; +extern int omp_get_ancestor_thread_num (int) __GOMP_NOTHROW; +extern int omp_get_team_size (int) __GOMP_NOTHROW; +extern int omp_get_active_level (void) __GOMP_NOTHROW; + +extern int omp_in_final (void) __GOMP_NOTHROW; + +extern int omp_get_cancellation (void) __GOMP_NOTHROW; +extern omp_proc_bind_t omp_get_proc_bind (void) __GOMP_NOTHROW; +extern int omp_get_num_places (void) __GOMP_NOTHROW; +extern int omp_get_place_num_procs (int) __GOMP_NOTHROW; +extern void omp_get_place_proc_ids (int, int *) __GOMP_NOTHROW; +extern int omp_get_place_num (void) __GOMP_NOTHROW; +extern int omp_get_partition_num_places (void) __GOMP_NOTHROW; +extern void omp_get_partition_place_nums (int *) __GOMP_NOTHROW; + +extern void omp_set_default_device (int) __GOMP_NOTHROW; +extern int omp_get_default_device (void) __GOMP_NOTHROW; +extern int omp_get_num_devices (void) __GOMP_NOTHROW; +extern int omp_get_num_teams (void) __GOMP_NOTHROW; +extern int omp_get_team_num (void) __GOMP_NOTHROW; + +extern int omp_is_initial_device (void) __GOMP_NOTHROW; +extern int omp_get_initial_device (void) __GOMP_NOTHROW; +extern int omp_get_max_task_priority (void) __GOMP_NOTHROW; + +extern void *omp_target_alloc (__SIZE_TYPE__, int) __GOMP_NOTHROW; +extern void omp_target_free (void *, int) __GOMP_NOTHROW; +extern int omp_target_is_present (void *, int) __GOMP_NOTHROW; +extern int omp_target_memcpy (void *, void *, __SIZE_TYPE__, __SIZE_TYPE__, + __SIZE_TYPE__, int, int) __GOMP_NOTHROW; +extern int omp_target_memcpy_rect (void *, void *, __SIZE_TYPE__, int, + const __SIZE_TYPE__ *, + const __SIZE_TYPE__ *, + const __SIZE_TYPE__ *, + const __SIZE_TYPE__ *, + const __SIZE_TYPE__ *, int, int) + __GOMP_NOTHROW; +extern int omp_target_associate_ptr (void *, void *, __SIZE_TYPE__, + __SIZE_TYPE__, int) __GOMP_NOTHROW; +extern int omp_target_disassociate_ptr (void *, int) __GOMP_NOTHROW; + +#ifdef __cplusplus +} +#endif + +#endif /* _OMP_H */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/openacc.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/openacc.h new file mode 100644 index 0000000..53d0c39 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/openacc.h @@ -0,0 +1,131 @@ +/* OpenACC Runtime Library User-facing Declarations + + Copyright (C) 2013-2017 Free Software Foundation, Inc. + + Contributed by Mentor Embedded. + + This file is part of the GNU Offloading and Multi Processing Library + (libgomp). + + Libgomp is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _OPENACC_H +#define _OPENACC_H 1 + +/* The OpenACC standard is silent on whether or not including + might or must not include other header files. We chose to include + some. */ +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#if __cplusplus >= 201103 +# define __GOACC_NOTHROW noexcept +#elif __cplusplus +# define __GOACC_NOTHROW throw () +#else /* Not C++ */ +# define __GOACC_NOTHROW __attribute__ ((__nothrow__)) +#endif + +/* Types */ +typedef enum acc_device_t { + /* Keep in sync with include/gomp-constants.h. */ + acc_device_none = 0, + acc_device_default = 1, + acc_device_host = 2, + /* acc_device_host_nonshm = 3 removed. */ + acc_device_not_host = 4, + acc_device_nvidia = 5, + _ACC_device_hwm, + /* Ensure enumeration is layout compatible with int. */ + _ACC_highest = __INT_MAX__, + _ACC_neg = -1 +} acc_device_t; + +typedef enum acc_async_t { + /* Keep in sync with include/gomp-constants.h. */ + acc_async_noval = -1, + acc_async_sync = -2 +} acc_async_t; + +int acc_get_num_devices (acc_device_t) __GOACC_NOTHROW; +void acc_set_device_type (acc_device_t) __GOACC_NOTHROW; +acc_device_t acc_get_device_type (void) __GOACC_NOTHROW; +void acc_set_device_num (int, acc_device_t) __GOACC_NOTHROW; +int acc_get_device_num (acc_device_t) __GOACC_NOTHROW; +int acc_async_test (int) __GOACC_NOTHROW; +int acc_async_test_all (void) __GOACC_NOTHROW; +void acc_wait (int) __GOACC_NOTHROW; +void acc_wait_async (int, int) __GOACC_NOTHROW; +void acc_wait_all (void) __GOACC_NOTHROW; +void acc_wait_all_async (int) __GOACC_NOTHROW; +void acc_init (acc_device_t) __GOACC_NOTHROW; +void acc_shutdown (acc_device_t) __GOACC_NOTHROW; +#ifdef __cplusplus +int acc_on_device (int __arg) __GOACC_NOTHROW; +#else +int acc_on_device (acc_device_t __arg) __GOACC_NOTHROW; +#endif +void *acc_malloc (size_t) __GOACC_NOTHROW; +void acc_free (void *) __GOACC_NOTHROW; +/* Some of these would be more correct with const qualifiers, but + the standard specifies otherwise. */ +void *acc_copyin (void *, size_t) __GOACC_NOTHROW; +void *acc_present_or_copyin (void *, size_t) __GOACC_NOTHROW; +void *acc_create (void *, size_t) __GOACC_NOTHROW; +void *acc_present_or_create (void *, size_t) __GOACC_NOTHROW; +void acc_copyout (void *, size_t) __GOACC_NOTHROW; +void acc_delete (void *, size_t) __GOACC_NOTHROW; +void acc_update_device (void *, size_t) __GOACC_NOTHROW; +void acc_update_self (void *, size_t) __GOACC_NOTHROW; +void acc_map_data (void *, void *, size_t) __GOACC_NOTHROW; +void acc_unmap_data (void *) __GOACC_NOTHROW; +void *acc_deviceptr (void *) __GOACC_NOTHROW; +void *acc_hostptr (void *) __GOACC_NOTHROW; +int acc_is_present (void *, size_t) __GOACC_NOTHROW; +void acc_memcpy_to_device (void *, void *, size_t) __GOACC_NOTHROW; +void acc_memcpy_from_device (void *, void *, size_t) __GOACC_NOTHROW; + +/* Old names. OpenACC does not specify whether these can or must + not be macros, inlines or aliases for the new names. */ +#define acc_pcreate acc_present_or_create +#define acc_pcopyin acc_present_or_copyin + +/* CUDA-specific routines. */ +void *acc_get_current_cuda_device (void) __GOACC_NOTHROW; +void *acc_get_current_cuda_context (void) __GOACC_NOTHROW; +void *acc_get_cuda_stream (int) __GOACC_NOTHROW; +int acc_set_cuda_stream (int, void *) __GOACC_NOTHROW; + +#ifdef __cplusplus +} + +/* Forwarding function with correctly typed arg. */ + +#pragma acc routine seq +inline int acc_on_device (acc_device_t __arg) __GOACC_NOTHROW +{ + return acc_on_device ((int) __arg); +} +#endif + +#endif /* _OPENACC_H */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/stdalign.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/stdalign.h new file mode 100644 index 0000000..243d40e --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/stdalign.h @@ -0,0 +1,39 @@ +/* Copyright (C) 2011-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* ISO C1X: 7.15 Alignment . */ + +#ifndef _STDALIGN_H +#define _STDALIGN_H + +#ifndef __cplusplus + +#define alignas _Alignas +#define alignof _Alignof + +#define __alignas_is_defined 1 +#define __alignof_is_defined 1 + +#endif + +#endif /* stdalign.h */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/stdarg.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/stdarg.h new file mode 100644 index 0000000..aa248be --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/stdarg.h @@ -0,0 +1,127 @@ +/* Copyright (C) 1989-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* + * ISO C Standard: 7.15 Variable arguments + */ + +#ifndef _STDARG_H +#ifndef _ANSI_STDARG_H_ +#ifndef __need___va_list +#define _STDARG_H +#define _ANSI_STDARG_H_ +#endif /* not __need___va_list */ +#undef __need___va_list + +/* Define __gnuc_va_list. */ + +#ifndef __GNUC_VA_LIST +#define __GNUC_VA_LIST +typedef __builtin_va_list __gnuc_va_list; +#endif + +/* Define the standard macros for the user, + if this invocation was from the user program. */ +#ifdef _STDARG_H + +#define va_start(v,l) __builtin_va_start(v,l) +#define va_end(v) __builtin_va_end(v) +#define va_arg(v,l) __builtin_va_arg(v,l) +#if !defined(__STRICT_ANSI__) || __STDC_VERSION__ + 0 >= 199900L \ + || __cplusplus + 0 >= 201103L +#define va_copy(d,s) __builtin_va_copy(d,s) +#endif +#define __va_copy(d,s) __builtin_va_copy(d,s) + +/* Define va_list, if desired, from __gnuc_va_list. */ +/* We deliberately do not define va_list when called from + stdio.h, because ANSI C says that stdio.h is not supposed to define + va_list. stdio.h needs to have access to that data type, + but must not use that name. It should use the name __gnuc_va_list, + which is safe because it is reserved for the implementation. */ + +#ifdef _BSD_VA_LIST +#undef _BSD_VA_LIST +#endif + +#if defined(__svr4__) || (defined(_SCO_DS) && !defined(__VA_LIST)) +/* SVR4.2 uses _VA_LIST for an internal alias for va_list, + so we must avoid testing it and setting it here. + SVR4 uses _VA_LIST as a flag in stdarg.h, but we should + have no conflict with that. */ +#ifndef _VA_LIST_ +#define _VA_LIST_ +#ifdef __i860__ +#ifndef _VA_LIST +#define _VA_LIST va_list +#endif +#endif /* __i860__ */ +typedef __gnuc_va_list va_list; +#ifdef _SCO_DS +#define __VA_LIST +#endif +#endif /* _VA_LIST_ */ +#else /* not __svr4__ || _SCO_DS */ + +/* The macro _VA_LIST_ is the same thing used by this file in Ultrix. + But on BSD NET2 we must not test or define or undef it. + (Note that the comments in NET 2's ansi.h + are incorrect for _VA_LIST_--see stdio.h!) */ +#if !defined (_VA_LIST_) || defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__) || defined(WINNT) +/* The macro _VA_LIST_DEFINED is used in Windows NT 3.5 */ +#ifndef _VA_LIST_DEFINED +/* The macro _VA_LIST is used in SCO Unix 3.2. */ +#ifndef _VA_LIST +/* The macro _VA_LIST_T_H is used in the Bull dpx2 */ +#ifndef _VA_LIST_T_H +/* The macro __va_list__ is used by BeOS. */ +#ifndef __va_list__ +typedef __gnuc_va_list va_list; +#endif /* not __va_list__ */ +#endif /* not _VA_LIST_T_H */ +#endif /* not _VA_LIST */ +#endif /* not _VA_LIST_DEFINED */ +#if !(defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__)) +#define _VA_LIST_ +#endif +#ifndef _VA_LIST +#define _VA_LIST +#endif +#ifndef _VA_LIST_DEFINED +#define _VA_LIST_DEFINED +#endif +#ifndef _VA_LIST_T_H +#define _VA_LIST_T_H +#endif +#ifndef __va_list__ +#define __va_list__ +#endif + +#endif /* not _VA_LIST_, except on certain systems */ + +#endif /* not __svr4__ */ + +#endif /* _STDARG_H */ + +#endif /* not _ANSI_STDARG_H_ */ +#endif /* not _STDARG_H */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/stdatomic.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/stdatomic.h new file mode 100644 index 0000000..fb29a14 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/stdatomic.h @@ -0,0 +1,243 @@ +/* Copyright (C) 2013-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* ISO C11 Standard: 7.17 Atomics . */ + +#ifndef _STDATOMIC_H +#define _STDATOMIC_H + +typedef enum + { + memory_order_relaxed = __ATOMIC_RELAXED, + memory_order_consume = __ATOMIC_CONSUME, + memory_order_acquire = __ATOMIC_ACQUIRE, + memory_order_release = __ATOMIC_RELEASE, + memory_order_acq_rel = __ATOMIC_ACQ_REL, + memory_order_seq_cst = __ATOMIC_SEQ_CST + } memory_order; + + +typedef _Atomic _Bool atomic_bool; +typedef _Atomic char atomic_char; +typedef _Atomic signed char atomic_schar; +typedef _Atomic unsigned char atomic_uchar; +typedef _Atomic short atomic_short; +typedef _Atomic unsigned short atomic_ushort; +typedef _Atomic int atomic_int; +typedef _Atomic unsigned int atomic_uint; +typedef _Atomic long atomic_long; +typedef _Atomic unsigned long atomic_ulong; +typedef _Atomic long long atomic_llong; +typedef _Atomic unsigned long long atomic_ullong; +typedef _Atomic __CHAR16_TYPE__ atomic_char16_t; +typedef _Atomic __CHAR32_TYPE__ atomic_char32_t; +typedef _Atomic __WCHAR_TYPE__ atomic_wchar_t; +typedef _Atomic __INT_LEAST8_TYPE__ atomic_int_least8_t; +typedef _Atomic __UINT_LEAST8_TYPE__ atomic_uint_least8_t; +typedef _Atomic __INT_LEAST16_TYPE__ atomic_int_least16_t; +typedef _Atomic __UINT_LEAST16_TYPE__ atomic_uint_least16_t; +typedef _Atomic __INT_LEAST32_TYPE__ atomic_int_least32_t; +typedef _Atomic __UINT_LEAST32_TYPE__ atomic_uint_least32_t; +typedef _Atomic __INT_LEAST64_TYPE__ atomic_int_least64_t; +typedef _Atomic __UINT_LEAST64_TYPE__ atomic_uint_least64_t; +typedef _Atomic __INT_FAST8_TYPE__ atomic_int_fast8_t; +typedef _Atomic __UINT_FAST8_TYPE__ atomic_uint_fast8_t; +typedef _Atomic __INT_FAST16_TYPE__ atomic_int_fast16_t; +typedef _Atomic __UINT_FAST16_TYPE__ atomic_uint_fast16_t; +typedef _Atomic __INT_FAST32_TYPE__ atomic_int_fast32_t; +typedef _Atomic __UINT_FAST32_TYPE__ atomic_uint_fast32_t; +typedef _Atomic __INT_FAST64_TYPE__ atomic_int_fast64_t; +typedef _Atomic __UINT_FAST64_TYPE__ atomic_uint_fast64_t; +typedef _Atomic __INTPTR_TYPE__ atomic_intptr_t; +typedef _Atomic __UINTPTR_TYPE__ atomic_uintptr_t; +typedef _Atomic __SIZE_TYPE__ atomic_size_t; +typedef _Atomic __PTRDIFF_TYPE__ atomic_ptrdiff_t; +typedef _Atomic __INTMAX_TYPE__ atomic_intmax_t; +typedef _Atomic __UINTMAX_TYPE__ atomic_uintmax_t; + + +#define ATOMIC_VAR_INIT(VALUE) (VALUE) + +/* Initialize an atomic object pointed to by PTR with VAL. */ +#define atomic_init(PTR, VAL) \ + atomic_store_explicit (PTR, VAL, __ATOMIC_RELAXED) + +#define kill_dependency(Y) \ + __extension__ \ + ({ \ + __auto_type __kill_dependency_tmp = (Y); \ + __kill_dependency_tmp; \ + }) + +extern void atomic_thread_fence (memory_order); +#define atomic_thread_fence(MO) __atomic_thread_fence (MO) +extern void atomic_signal_fence (memory_order); +#define atomic_signal_fence(MO) __atomic_signal_fence (MO) +#define atomic_is_lock_free(OBJ) __atomic_is_lock_free (sizeof (*(OBJ)), (OBJ)) + +#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE +#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE +#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE +#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE +#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE +#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE +#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE +#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE +#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE +#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE + + +/* Note that these macros require __typeof__ and __auto_type to remove + _Atomic qualifiers (and const qualifiers, if those are valid on + macro operands). + + Also note that the header file uses the generic form of __atomic + builtins, which requires the address to be taken of the value + parameter, and then we pass that value on. This allows the macros + to work for any type, and the compiler is smart enough to convert + these to lock-free _N variants if possible, and throw away the + temps. */ + +#define atomic_store_explicit(PTR, VAL, MO) \ + __extension__ \ + ({ \ + __auto_type __atomic_store_ptr = (PTR); \ + __typeof__ (*__atomic_store_ptr) __atomic_store_tmp = (VAL); \ + __atomic_store (__atomic_store_ptr, &__atomic_store_tmp, (MO)); \ + }) + +#define atomic_store(PTR, VAL) \ + atomic_store_explicit (PTR, VAL, __ATOMIC_SEQ_CST) + + +#define atomic_load_explicit(PTR, MO) \ + __extension__ \ + ({ \ + __auto_type __atomic_load_ptr = (PTR); \ + __typeof__ (*__atomic_load_ptr) __atomic_load_tmp; \ + __atomic_load (__atomic_load_ptr, &__atomic_load_tmp, (MO)); \ + __atomic_load_tmp; \ + }) + +#define atomic_load(PTR) atomic_load_explicit (PTR, __ATOMIC_SEQ_CST) + + +#define atomic_exchange_explicit(PTR, VAL, MO) \ + __extension__ \ + ({ \ + __auto_type __atomic_exchange_ptr = (PTR); \ + __typeof__ (*__atomic_exchange_ptr) __atomic_exchange_val = (VAL); \ + __typeof__ (*__atomic_exchange_ptr) __atomic_exchange_tmp; \ + __atomic_exchange (__atomic_exchange_ptr, &__atomic_exchange_val, \ + &__atomic_exchange_tmp, (MO)); \ + __atomic_exchange_tmp; \ + }) + +#define atomic_exchange(PTR, VAL) \ + atomic_exchange_explicit (PTR, VAL, __ATOMIC_SEQ_CST) + + +#define atomic_compare_exchange_strong_explicit(PTR, VAL, DES, SUC, FAIL) \ + __extension__ \ + ({ \ + __auto_type __atomic_compare_exchange_ptr = (PTR); \ + __typeof__ (*__atomic_compare_exchange_ptr) __atomic_compare_exchange_tmp \ + = (DES); \ + __atomic_compare_exchange (__atomic_compare_exchange_ptr, (VAL), \ + &__atomic_compare_exchange_tmp, 0, \ + (SUC), (FAIL)); \ + }) + +#define atomic_compare_exchange_strong(PTR, VAL, DES) \ + atomic_compare_exchange_strong_explicit (PTR, VAL, DES, __ATOMIC_SEQ_CST, \ + __ATOMIC_SEQ_CST) + +#define atomic_compare_exchange_weak_explicit(PTR, VAL, DES, SUC, FAIL) \ + __extension__ \ + ({ \ + __auto_type __atomic_compare_exchange_ptr = (PTR); \ + __typeof__ (*__atomic_compare_exchange_ptr) __atomic_compare_exchange_tmp \ + = (DES); \ + __atomic_compare_exchange (__atomic_compare_exchange_ptr, (VAL), \ + &__atomic_compare_exchange_tmp, 1, \ + (SUC), (FAIL)); \ + }) + +#define atomic_compare_exchange_weak(PTR, VAL, DES) \ + atomic_compare_exchange_weak_explicit (PTR, VAL, DES, __ATOMIC_SEQ_CST, \ + __ATOMIC_SEQ_CST) + + + +#define atomic_fetch_add(PTR, VAL) __atomic_fetch_add ((PTR), (VAL), \ + __ATOMIC_SEQ_CST) +#define atomic_fetch_add_explicit(PTR, VAL, MO) \ + __atomic_fetch_add ((PTR), (VAL), (MO)) + +#define atomic_fetch_sub(PTR, VAL) __atomic_fetch_sub ((PTR), (VAL), \ + __ATOMIC_SEQ_CST) +#define atomic_fetch_sub_explicit(PTR, VAL, MO) \ + __atomic_fetch_sub ((PTR), (VAL), (MO)) + +#define atomic_fetch_or(PTR, VAL) __atomic_fetch_or ((PTR), (VAL), \ + __ATOMIC_SEQ_CST) +#define atomic_fetch_or_explicit(PTR, VAL, MO) \ + __atomic_fetch_or ((PTR), (VAL), (MO)) + +#define atomic_fetch_xor(PTR, VAL) __atomic_fetch_xor ((PTR), (VAL), \ + __ATOMIC_SEQ_CST) +#define atomic_fetch_xor_explicit(PTR, VAL, MO) \ + __atomic_fetch_xor ((PTR), (VAL), (MO)) + +#define atomic_fetch_and(PTR, VAL) __atomic_fetch_and ((PTR), (VAL), \ + __ATOMIC_SEQ_CST) +#define atomic_fetch_and_explicit(PTR, VAL, MO) \ + __atomic_fetch_and ((PTR), (VAL), (MO)) + + +typedef _Atomic struct +{ +#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1 + _Bool __val; +#else + unsigned char __val; +#endif +} atomic_flag; + +#define ATOMIC_FLAG_INIT { 0 } + + +extern _Bool atomic_flag_test_and_set (volatile atomic_flag *); +#define atomic_flag_test_and_set(PTR) \ + __atomic_test_and_set ((PTR), __ATOMIC_SEQ_CST) +extern _Bool atomic_flag_test_and_set_explicit (volatile atomic_flag *, + memory_order); +#define atomic_flag_test_and_set_explicit(PTR, MO) \ + __atomic_test_and_set ((PTR), (MO)) + +extern void atomic_flag_clear (volatile atomic_flag *); +#define atomic_flag_clear(PTR) __atomic_clear ((PTR), __ATOMIC_SEQ_CST) +extern void atomic_flag_clear_explicit (volatile atomic_flag *, memory_order); +#define atomic_flag_clear_explicit(PTR, MO) __atomic_clear ((PTR), (MO)) + +#endif /* _STDATOMIC_H */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/stdbool.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/stdbool.h new file mode 100644 index 0000000..a69fc3a --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/stdbool.h @@ -0,0 +1,54 @@ +/* Copyright (C) 1998-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* + * ISO C Standard: 7.16 Boolean type and values + */ + +#ifndef _STDBOOL_H +#define _STDBOOL_H + +#ifndef __cplusplus + +#define bool _Bool +#define true 1 +#define false 0 + +#else /* __cplusplus */ + +/* Supporting _Bool in C++ is a GCC extension. */ +#define _Bool bool + +#if __cplusplus < 201103L +/* Defining these macros in C++98 is a GCC extension. */ +#define bool bool +#define false false +#define true true +#endif + +#endif /* __cplusplus */ + +/* Signal that all the definitions are present. */ +#define __bool_true_false_are_defined 1 + +#endif /* stdbool.h */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/stddef.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/stddef.h new file mode 100644 index 0000000..872f451 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/stddef.h @@ -0,0 +1,451 @@ +/* Copyright (C) 1989-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* + * ISO C Standard: 7.17 Common definitions + */ +#if (!defined(_STDDEF_H) && !defined(_STDDEF_H_) && !defined(_ANSI_STDDEF_H) \ + && !defined(__STDDEF_H__)) \ + || defined(__need_wchar_t) || defined(__need_size_t) \ + || defined(__need_ptrdiff_t) || defined(__need_NULL) \ + || defined(__need_wint_t) + +/* Any one of these symbols __need_* means that GNU libc + wants us just to define one data type. So don't define + the symbols that indicate this file's entire job has been done. */ +#if (!defined(__need_wchar_t) && !defined(__need_size_t) \ + && !defined(__need_ptrdiff_t) && !defined(__need_NULL) \ + && !defined(__need_wint_t)) +#define _STDDEF_H +#define _STDDEF_H_ +/* snaroff@next.com says the NeXT needs this. */ +#define _ANSI_STDDEF_H +#endif + +#ifndef __sys_stdtypes_h +/* This avoids lossage on SunOS but only if stdtypes.h comes first. + There's no way to win with the other order! Sun lossage. */ + +/* On 4.3bsd-net2, make sure ansi.h is included, so we have + one less case to deal with in the following. */ +#if defined (__BSD_NET2__) || defined (____386BSD____) || (defined (__FreeBSD__) && (__FreeBSD__ < 5)) || defined(__NetBSD__) +#include +#endif +/* On FreeBSD 5, machine/ansi.h does not exist anymore... */ +#if defined (__FreeBSD__) && (__FreeBSD__ >= 5) +#include +#endif + +/* In 4.3bsd-net2, machine/ansi.h defines these symbols, which are + defined if the corresponding type is *not* defined. + FreeBSD-2.1 defines _MACHINE_ANSI_H_ instead of _ANSI_H_. + NetBSD defines _I386_ANSI_H_ and _X86_64_ANSI_H_ instead of _ANSI_H_ */ +#if defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) || defined(_X86_64_ANSI_H_) || defined(_I386_ANSI_H_) +#if !defined(_SIZE_T_) && !defined(_BSD_SIZE_T_) +#define _SIZE_T +#endif +#if !defined(_PTRDIFF_T_) && !defined(_BSD_PTRDIFF_T_) +#define _PTRDIFF_T +#endif +/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_ + instead of _WCHAR_T_. */ +#if !defined(_WCHAR_T_) && !defined(_BSD_WCHAR_T_) +#ifndef _BSD_WCHAR_T_ +#define _WCHAR_T +#endif +#endif +/* Undef _FOO_T_ if we are supposed to define foo_t. */ +#if defined (__need_ptrdiff_t) || defined (_STDDEF_H_) +#undef _PTRDIFF_T_ +#undef _BSD_PTRDIFF_T_ +#endif +#if defined (__need_size_t) || defined (_STDDEF_H_) +#undef _SIZE_T_ +#undef _BSD_SIZE_T_ +#endif +#if defined (__need_wchar_t) || defined (_STDDEF_H_) +#undef _WCHAR_T_ +#undef _BSD_WCHAR_T_ +#endif +#endif /* defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) || defined(_X86_64_ANSI_H_) || defined(_I386_ANSI_H_) */ + +/* Sequent's header files use _PTRDIFF_T_ in some conflicting way. + Just ignore it. */ +#if defined (__sequent__) && defined (_PTRDIFF_T_) +#undef _PTRDIFF_T_ +#endif + +/* On VxWorks, may have defined macros like + _TYPE_size_t which will typedef size_t. fixincludes patched the + vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is + not defined, and so that defining this macro defines _GCC_SIZE_T. + If we find that the macros are still defined at this point, we must + invoke them so that the type is defined as expected. */ +#if defined (_TYPE_ptrdiff_t) && (defined (__need_ptrdiff_t) || defined (_STDDEF_H_)) +_TYPE_ptrdiff_t; +#undef _TYPE_ptrdiff_t +#endif +#if defined (_TYPE_size_t) && (defined (__need_size_t) || defined (_STDDEF_H_)) +_TYPE_size_t; +#undef _TYPE_size_t +#endif +#if defined (_TYPE_wchar_t) && (defined (__need_wchar_t) || defined (_STDDEF_H_)) +_TYPE_wchar_t; +#undef _TYPE_wchar_t +#endif + +/* In case nobody has defined these types, but we aren't running under + GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and + __WCHAR_TYPE__ have reasonable values. This can happen if the + parts of GCC is compiled by an older compiler, that actually + include gstddef.h, such as collect2. */ + +/* Signed type of difference of two pointers. */ + +/* Define this type if we are doing the whole job, + or if we want this type in particular. */ +#if defined (_STDDEF_H) || defined (__need_ptrdiff_t) +#ifndef _PTRDIFF_T /* in case has defined it. */ +#ifndef _T_PTRDIFF_ +#ifndef _T_PTRDIFF +#ifndef __PTRDIFF_T +#ifndef _PTRDIFF_T_ +#ifndef _BSD_PTRDIFF_T_ +#ifndef ___int_ptrdiff_t_h +#ifndef _GCC_PTRDIFF_T +#ifndef _PTRDIFF_T_DECLARED /* DragonFly */ +#define _PTRDIFF_T +#define _T_PTRDIFF_ +#define _T_PTRDIFF +#define __PTRDIFF_T +#define _PTRDIFF_T_ +#define _BSD_PTRDIFF_T_ +#define ___int_ptrdiff_t_h +#define _GCC_PTRDIFF_T +#define _PTRDIFF_T_DECLARED +#ifndef __PTRDIFF_TYPE__ +#define __PTRDIFF_TYPE__ long int +#endif +typedef __PTRDIFF_TYPE__ ptrdiff_t; +#endif /* _PTRDIFF_T_DECLARED */ +#endif /* _GCC_PTRDIFF_T */ +#endif /* ___int_ptrdiff_t_h */ +#endif /* _BSD_PTRDIFF_T_ */ +#endif /* _PTRDIFF_T_ */ +#endif /* __PTRDIFF_T */ +#endif /* _T_PTRDIFF */ +#endif /* _T_PTRDIFF_ */ +#endif /* _PTRDIFF_T */ + +/* If this symbol has done its job, get rid of it. */ +#undef __need_ptrdiff_t + +#endif /* _STDDEF_H or __need_ptrdiff_t. */ + +/* Unsigned type of `sizeof' something. */ + +/* Define this type if we are doing the whole job, + or if we want this type in particular. */ +#if defined (_STDDEF_H) || defined (__need_size_t) +#ifndef __size_t__ /* BeOS */ +#ifndef __SIZE_T__ /* Cray Unicos/Mk */ +#ifndef _SIZE_T /* in case has defined it. */ +#ifndef _SYS_SIZE_T_H +#ifndef _T_SIZE_ +#ifndef _T_SIZE +#ifndef __SIZE_T +#ifndef _SIZE_T_ +#ifndef _BSD_SIZE_T_ +#ifndef _SIZE_T_DEFINED_ +#ifndef _SIZE_T_DEFINED +#ifndef _BSD_SIZE_T_DEFINED_ /* Darwin */ +#ifndef _SIZE_T_DECLARED /* FreeBSD 5 */ +#ifndef ___int_size_t_h +#ifndef _GCC_SIZE_T +#ifndef _SIZET_ +#ifndef __size_t +#define __size_t__ /* BeOS */ +#define __SIZE_T__ /* Cray Unicos/Mk */ +#define _SIZE_T +#define _SYS_SIZE_T_H +#define _T_SIZE_ +#define _T_SIZE +#define __SIZE_T +#define _SIZE_T_ +#define _BSD_SIZE_T_ +#define _SIZE_T_DEFINED_ +#define _SIZE_T_DEFINED +#define _BSD_SIZE_T_DEFINED_ /* Darwin */ +#define _SIZE_T_DECLARED /* FreeBSD 5 */ +#define ___int_size_t_h +#define _GCC_SIZE_T +#define _SIZET_ +#if (defined (__FreeBSD__) && (__FreeBSD__ >= 5)) \ + || defined(__DragonFly__) \ + || defined(__FreeBSD_kernel__) +/* __size_t is a typedef on FreeBSD 5, must not trash it. */ +#elif defined (__VMS__) +/* __size_t is also a typedef on VMS. */ +#else +#define __size_t +#endif +#ifndef __SIZE_TYPE__ +#define __SIZE_TYPE__ long unsigned int +#endif +#if !(defined (__GNUG__) && defined (size_t)) +typedef __SIZE_TYPE__ size_t; +#ifdef __BEOS__ +typedef long ssize_t; +#endif /* __BEOS__ */ +#endif /* !(defined (__GNUG__) && defined (size_t)) */ +#endif /* __size_t */ +#endif /* _SIZET_ */ +#endif /* _GCC_SIZE_T */ +#endif /* ___int_size_t_h */ +#endif /* _SIZE_T_DECLARED */ +#endif /* _BSD_SIZE_T_DEFINED_ */ +#endif /* _SIZE_T_DEFINED */ +#endif /* _SIZE_T_DEFINED_ */ +#endif /* _BSD_SIZE_T_ */ +#endif /* _SIZE_T_ */ +#endif /* __SIZE_T */ +#endif /* _T_SIZE */ +#endif /* _T_SIZE_ */ +#endif /* _SYS_SIZE_T_H */ +#endif /* _SIZE_T */ +#endif /* __SIZE_T__ */ +#endif /* __size_t__ */ +#undef __need_size_t +#endif /* _STDDEF_H or __need_size_t. */ + + +/* Wide character type. + Locale-writers should change this as necessary to + be big enough to hold unique values not between 0 and 127, + and not (wchar_t) -1, for each defined multibyte character. */ + +/* Define this type if we are doing the whole job, + or if we want this type in particular. */ +#if defined (_STDDEF_H) || defined (__need_wchar_t) +#ifndef __wchar_t__ /* BeOS */ +#ifndef __WCHAR_T__ /* Cray Unicos/Mk */ +#ifndef _WCHAR_T +#ifndef _T_WCHAR_ +#ifndef _T_WCHAR +#ifndef __WCHAR_T +#ifndef _WCHAR_T_ +#ifndef _BSD_WCHAR_T_ +#ifndef _BSD_WCHAR_T_DEFINED_ /* Darwin */ +#ifndef _BSD_RUNE_T_DEFINED_ /* Darwin */ +#ifndef _WCHAR_T_DECLARED /* FreeBSD 5 */ +#ifndef _WCHAR_T_DEFINED_ +#ifndef _WCHAR_T_DEFINED +#ifndef _WCHAR_T_H +#ifndef ___int_wchar_t_h +#ifndef __INT_WCHAR_T_H +#ifndef _GCC_WCHAR_T +#define __wchar_t__ /* BeOS */ +#define __WCHAR_T__ /* Cray Unicos/Mk */ +#define _WCHAR_T +#define _T_WCHAR_ +#define _T_WCHAR +#define __WCHAR_T +#define _WCHAR_T_ +#define _BSD_WCHAR_T_ +#define _WCHAR_T_DEFINED_ +#define _WCHAR_T_DEFINED +#define _WCHAR_T_H +#define ___int_wchar_t_h +#define __INT_WCHAR_T_H +#define _GCC_WCHAR_T +#define _WCHAR_T_DECLARED + +/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_ + instead of _WCHAR_T_, and _BSD_RUNE_T_ (which, unlike the other + symbols in the _FOO_T_ family, stays defined even after its + corresponding type is defined). If we define wchar_t, then we + must undef _WCHAR_T_; for BSD/386 1.1 (and perhaps others), if + we undef _WCHAR_T_, then we must also define rune_t, since + headers like runetype.h assume that if machine/ansi.h is included, + and _BSD_WCHAR_T_ is not defined, then rune_t is available. + machine/ansi.h says, "Note that _WCHAR_T_ and _RUNE_T_ must be of + the same type." */ +#ifdef _BSD_WCHAR_T_ +#undef _BSD_WCHAR_T_ +#ifdef _BSD_RUNE_T_ +#if !defined (_ANSI_SOURCE) && !defined (_POSIX_SOURCE) +typedef _BSD_RUNE_T_ rune_t; +#define _BSD_WCHAR_T_DEFINED_ +#define _BSD_RUNE_T_DEFINED_ /* Darwin */ +#if defined (__FreeBSD__) && (__FreeBSD__ < 5) +/* Why is this file so hard to maintain properly? In contrast to + the comment above regarding BSD/386 1.1, on FreeBSD for as long + as the symbol has existed, _BSD_RUNE_T_ must not stay defined or + redundant typedefs will occur when stdlib.h is included after this file. */ +#undef _BSD_RUNE_T_ +#endif +#endif +#endif +#endif +/* FreeBSD 5 can't be handled well using "traditional" logic above + since it no longer defines _BSD_RUNE_T_ yet still desires to export + rune_t in some cases... */ +#if defined (__FreeBSD__) && (__FreeBSD__ >= 5) +#if !defined (_ANSI_SOURCE) && !defined (_POSIX_SOURCE) +#if __BSD_VISIBLE +#ifndef _RUNE_T_DECLARED +typedef __rune_t rune_t; +#define _RUNE_T_DECLARED +#endif +#endif +#endif +#endif + +#ifndef __WCHAR_TYPE__ +#define __WCHAR_TYPE__ int +#endif +#ifndef __cplusplus +typedef __WCHAR_TYPE__ wchar_t; +#endif +#endif +#endif +#endif +#endif +#endif +#endif +#endif /* _WCHAR_T_DECLARED */ +#endif /* _BSD_RUNE_T_DEFINED_ */ +#endif +#endif +#endif +#endif +#endif +#endif +#endif +#endif /* __WCHAR_T__ */ +#endif /* __wchar_t__ */ +#undef __need_wchar_t +#endif /* _STDDEF_H or __need_wchar_t. */ + +#if defined (__need_wint_t) +#ifndef _WINT_T +#define _WINT_T + +#ifndef __WINT_TYPE__ +#define __WINT_TYPE__ unsigned int +#endif +typedef __WINT_TYPE__ wint_t; +#endif +#undef __need_wint_t +#endif + +/* In 4.3bsd-net2, leave these undefined to indicate that size_t, etc. + are already defined. */ +/* BSD/OS 3.1 and FreeBSD [23].x require the MACHINE_ANSI_H check here. */ +/* NetBSD 5 requires the I386_ANSI_H and X86_64_ANSI_H checks here. */ +#if defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) || defined(_X86_64_ANSI_H_) || defined(_I386_ANSI_H_) +/* The references to _GCC_PTRDIFF_T_, _GCC_SIZE_T_, and _GCC_WCHAR_T_ + are probably typos and should be removed before 2.8 is released. */ +#ifdef _GCC_PTRDIFF_T_ +#undef _PTRDIFF_T_ +#undef _BSD_PTRDIFF_T_ +#endif +#ifdef _GCC_SIZE_T_ +#undef _SIZE_T_ +#undef _BSD_SIZE_T_ +#endif +#ifdef _GCC_WCHAR_T_ +#undef _WCHAR_T_ +#undef _BSD_WCHAR_T_ +#endif +/* The following ones are the real ones. */ +#ifdef _GCC_PTRDIFF_T +#undef _PTRDIFF_T_ +#undef _BSD_PTRDIFF_T_ +#endif +#ifdef _GCC_SIZE_T +#undef _SIZE_T_ +#undef _BSD_SIZE_T_ +#endif +#ifdef _GCC_WCHAR_T +#undef _WCHAR_T_ +#undef _BSD_WCHAR_T_ +#endif +#endif /* _ANSI_H_ || _MACHINE_ANSI_H_ || _X86_64_ANSI_H_ || _I386_ANSI_H_ */ + +#endif /* __sys_stdtypes_h */ + +/* A null pointer constant. */ + +#if defined (_STDDEF_H) || defined (__need_NULL) +#undef NULL /* in case has defined it. */ +#ifdef __GNUG__ +#define NULL __null +#else /* G++ */ +#ifndef __cplusplus +#define NULL ((void *)0) +#else /* C++ */ +#define NULL 0 +#endif /* C++ */ +#endif /* G++ */ +#endif /* NULL not defined and or need NULL. */ +#undef __need_NULL + +#ifdef _STDDEF_H + +/* Offset of member MEMBER in a struct of type TYPE. */ +#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER) + +#if (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) \ + || (defined(__cplusplus) && __cplusplus >= 201103L) +#ifndef _GCC_MAX_ALIGN_T +#define _GCC_MAX_ALIGN_T +/* Type whose alignment is supported in every context and is at least + as great as that of any standard type not using alignment + specifiers. */ +typedef struct { + long long __max_align_ll __attribute__((__aligned__(__alignof__(long long)))); + long double __max_align_ld __attribute__((__aligned__(__alignof__(long double)))); + /* _Float128 is defined as a basic type, so max_align_t must be + sufficiently aligned for it. This code must work in C++, so we + use __float128 here; that is only available on some + architectures, but only on i386 is extra alignment needed for + __float128. */ +#ifdef __i386__ + __float128 __max_align_f128 __attribute__((__aligned__(__alignof(__float128)))); +#endif +} max_align_t; +#endif +#endif /* C11 or C++11. */ + +#if defined(__cplusplus) && __cplusplus >= 201103L +#ifndef _GXX_NULLPTR_T +#define _GXX_NULLPTR_T + typedef decltype(nullptr) nullptr_t; +#endif +#endif /* C++11. */ + +#endif /* _STDDEF_H was defined this time */ + +#endif /* !_STDDEF_H && !_STDDEF_H_ && !_ANSI_STDDEF_H && !__STDDEF_H__ + || __need_XXX was not defined before */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/stdfix.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/stdfix.h new file mode 100644 index 0000000..9de1b72 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/stdfix.h @@ -0,0 +1,204 @@ +/* Copyright (C) 2007-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* ISO/IEC JTC1 SC22 WG14 N1169 + * Date: 2006-04-04 + * ISO/IEC TR 18037 + * Programming languages - C - Extensions to support embedded processors + */ + +#ifndef _STDFIX_H +#define _STDFIX_H + +/* 7.18a.1 Introduction. */ + +#undef fract +#undef accum +#undef sat +#define fract _Fract +#define accum _Accum +#define sat _Sat + +/* 7.18a.3 Precision macros. */ + +#undef SFRACT_FBIT +#undef SFRACT_MIN +#undef SFRACT_MAX +#undef SFRACT_EPSILON +#define SFRACT_FBIT __SFRACT_FBIT__ +#define SFRACT_MIN __SFRACT_MIN__ +#define SFRACT_MAX __SFRACT_MAX__ +#define SFRACT_EPSILON __SFRACT_EPSILON__ + +#undef USFRACT_FBIT +#undef USFRACT_MIN +#undef USFRACT_MAX +#undef USFRACT_EPSILON +#define USFRACT_FBIT __USFRACT_FBIT__ +#define USFRACT_MIN __USFRACT_MIN__ /* GCC extension. */ +#define USFRACT_MAX __USFRACT_MAX__ +#define USFRACT_EPSILON __USFRACT_EPSILON__ + +#undef FRACT_FBIT +#undef FRACT_MIN +#undef FRACT_MAX +#undef FRACT_EPSILON +#define FRACT_FBIT __FRACT_FBIT__ +#define FRACT_MIN __FRACT_MIN__ +#define FRACT_MAX __FRACT_MAX__ +#define FRACT_EPSILON __FRACT_EPSILON__ + +#undef UFRACT_FBIT +#undef UFRACT_MIN +#undef UFRACT_MAX +#undef UFRACT_EPSILON +#define UFRACT_FBIT __UFRACT_FBIT__ +#define UFRACT_MIN __UFRACT_MIN__ /* GCC extension. */ +#define UFRACT_MAX __UFRACT_MAX__ +#define UFRACT_EPSILON __UFRACT_EPSILON__ + +#undef LFRACT_FBIT +#undef LFRACT_MIN +#undef LFRACT_MAX +#undef LFRACT_EPSILON +#define LFRACT_FBIT __LFRACT_FBIT__ +#define LFRACT_MIN __LFRACT_MIN__ +#define LFRACT_MAX __LFRACT_MAX__ +#define LFRACT_EPSILON __LFRACT_EPSILON__ + +#undef ULFRACT_FBIT +#undef ULFRACT_MIN +#undef ULFRACT_MAX +#undef ULFRACT_EPSILON +#define ULFRACT_FBIT __ULFRACT_FBIT__ +#define ULFRACT_MIN __ULFRACT_MIN__ /* GCC extension. */ +#define ULFRACT_MAX __ULFRACT_MAX__ +#define ULFRACT_EPSILON __ULFRACT_EPSILON__ + +#undef LLFRACT_FBIT +#undef LLFRACT_MIN +#undef LLFRACT_MAX +#undef LLFRACT_EPSILON +#define LLFRACT_FBIT __LLFRACT_FBIT__ /* GCC extension. */ +#define LLFRACT_MIN __LLFRACT_MIN__ /* GCC extension. */ +#define LLFRACT_MAX __LLFRACT_MAX__ /* GCC extension. */ +#define LLFRACT_EPSILON __LLFRACT_EPSILON__ /* GCC extension. */ + +#undef ULLFRACT_FBIT +#undef ULLFRACT_MIN +#undef ULLFRACT_MAX +#undef ULLFRACT_EPSILON +#define ULLFRACT_FBIT __ULLFRACT_FBIT__ /* GCC extension. */ +#define ULLFRACT_MIN __ULLFRACT_MIN__ /* GCC extension. */ +#define ULLFRACT_MAX __ULLFRACT_MAX__ /* GCC extension. */ +#define ULLFRACT_EPSILON __ULLFRACT_EPSILON__ /* GCC extension. */ + +#undef SACCUM_FBIT +#undef SACCUM_IBIT +#undef SACCUM_MIN +#undef SACCUM_MAX +#undef SACCUM_EPSILON +#define SACCUM_FBIT __SACCUM_FBIT__ +#define SACCUM_IBIT __SACCUM_IBIT__ +#define SACCUM_MIN __SACCUM_MIN__ +#define SACCUM_MAX __SACCUM_MAX__ +#define SACCUM_EPSILON __SACCUM_EPSILON__ + +#undef USACCUM_FBIT +#undef USACCUM_IBIT +#undef USACCUM_MIN +#undef USACCUM_MAX +#undef USACCUM_EPSILON +#define USACCUM_FBIT __USACCUM_FBIT__ +#define USACCUM_IBIT __USACCUM_IBIT__ +#define USACCUM_MIN __USACCUM_MIN__ /* GCC extension. */ +#define USACCUM_MAX __USACCUM_MAX__ +#define USACCUM_EPSILON __USACCUM_EPSILON__ + +#undef ACCUM_FBIT +#undef ACCUM_IBIT +#undef ACCUM_MIN +#undef ACCUM_MAX +#undef ACCUM_EPSILON +#define ACCUM_FBIT __ACCUM_FBIT__ +#define ACCUM_IBIT __ACCUM_IBIT__ +#define ACCUM_MIN __ACCUM_MIN__ +#define ACCUM_MAX __ACCUM_MAX__ +#define ACCUM_EPSILON __ACCUM_EPSILON__ + +#undef UACCUM_FBIT +#undef UACCUM_IBIT +#undef UACCUM_MIN +#undef UACCUM_MAX +#undef UACCUM_EPSILON +#define UACCUM_FBIT __UACCUM_FBIT__ +#define UACCUM_IBIT __UACCUM_IBIT__ +#define UACCUM_MIN __UACCUM_MIN__ /* GCC extension. */ +#define UACCUM_MAX __UACCUM_MAX__ +#define UACCUM_EPSILON __UACCUM_EPSILON__ + +#undef LACCUM_FBIT +#undef LACCUM_IBIT +#undef LACCUM_MIN +#undef LACCUM_MAX +#undef LACCUM_EPSILON +#define LACCUM_FBIT __LACCUM_FBIT__ +#define LACCUM_IBIT __LACCUM_IBIT__ +#define LACCUM_MIN __LACCUM_MIN__ +#define LACCUM_MAX __LACCUM_MAX__ +#define LACCUM_EPSILON __LACCUM_EPSILON__ + +#undef ULACCUM_FBIT +#undef ULACCUM_IBIT +#undef ULACCUM_MIN +#undef ULACCUM_MAX +#undef ULACCUM_EPSILON +#define ULACCUM_FBIT __ULACCUM_FBIT__ +#define ULACCUM_IBIT __ULACCUM_IBIT__ +#define ULACCUM_MIN __ULACCUM_MIN__ /* GCC extension. */ +#define ULACCUM_MAX __ULACCUM_MAX__ +#define ULACCUM_EPSILON __ULACCUM_EPSILON__ + +#undef LLACCUM_FBIT +#undef LLACCUM_IBIT +#undef LLACCUM_MIN +#undef LLACCUM_MAX +#undef LLACCUM_EPSILON +#define LLACCUM_FBIT __LLACCUM_FBIT__ /* GCC extension. */ +#define LLACCUM_IBIT __LLACCUM_IBIT__ /* GCC extension. */ +#define LLACCUM_MIN __LLACCUM_MIN__ /* GCC extension. */ +#define LLACCUM_MAX __LLACCUM_MAX__ /* GCC extension. */ +#define LLACCUM_EPSILON __LLACCUM_EPSILON__ /* GCC extension. */ + +#undef ULLACCUM_FBIT +#undef ULLACCUM_IBIT +#undef ULLACCUM_MIN +#undef ULLACCUM_MAX +#undef ULLACCUM_EPSILON +#define ULLACCUM_FBIT __ULLACCUM_FBIT__ /* GCC extension. */ +#define ULLACCUM_IBIT __ULLACCUM_IBIT__ /* GCC extension. */ +#define ULLACCUM_MIN __ULLACCUM_MIN__ /* GCC extension. */ +#define ULLACCUM_MAX __ULLACCUM_MAX__ /* GCC extension. */ +#define ULLACCUM_EPSILON __ULLACCUM_EPSILON__ /* GCC extension. */ + +#endif /* _STDFIX_H */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/stdint-gcc.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/stdint-gcc.h new file mode 100644 index 0000000..0ee7e35 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/stdint-gcc.h @@ -0,0 +1,364 @@ +/* Copyright (C) 2008-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* + * ISO C Standard: 7.18 Integer types + */ + +#ifndef _GCC_STDINT_H +#define _GCC_STDINT_H + +/* 7.8.1.1 Exact-width integer types */ + +#ifdef __INT8_TYPE__ +typedef __INT8_TYPE__ int8_t; +#endif +#ifdef __INT16_TYPE__ +typedef __INT16_TYPE__ int16_t; +#endif +#ifdef __INT32_TYPE__ +typedef __INT32_TYPE__ int32_t; +#endif +#ifdef __INT64_TYPE__ +typedef __INT64_TYPE__ int64_t; +#endif +#ifdef __UINT8_TYPE__ +typedef __UINT8_TYPE__ uint8_t; +#endif +#ifdef __UINT16_TYPE__ +typedef __UINT16_TYPE__ uint16_t; +#endif +#ifdef __UINT32_TYPE__ +typedef __UINT32_TYPE__ uint32_t; +#endif +#ifdef __UINT64_TYPE__ +typedef __UINT64_TYPE__ uint64_t; +#endif + +/* 7.8.1.2 Minimum-width integer types */ + +typedef __INT_LEAST8_TYPE__ int_least8_t; +typedef __INT_LEAST16_TYPE__ int_least16_t; +typedef __INT_LEAST32_TYPE__ int_least32_t; +typedef __INT_LEAST64_TYPE__ int_least64_t; +typedef __UINT_LEAST8_TYPE__ uint_least8_t; +typedef __UINT_LEAST16_TYPE__ uint_least16_t; +typedef __UINT_LEAST32_TYPE__ uint_least32_t; +typedef __UINT_LEAST64_TYPE__ uint_least64_t; + +/* 7.8.1.3 Fastest minimum-width integer types */ + +typedef __INT_FAST8_TYPE__ int_fast8_t; +typedef __INT_FAST16_TYPE__ int_fast16_t; +typedef __INT_FAST32_TYPE__ int_fast32_t; +typedef __INT_FAST64_TYPE__ int_fast64_t; +typedef __UINT_FAST8_TYPE__ uint_fast8_t; +typedef __UINT_FAST16_TYPE__ uint_fast16_t; +typedef __UINT_FAST32_TYPE__ uint_fast32_t; +typedef __UINT_FAST64_TYPE__ uint_fast64_t; + +/* 7.8.1.4 Integer types capable of holding object pointers */ + +#ifdef __INTPTR_TYPE__ +typedef __INTPTR_TYPE__ intptr_t; +#endif +#ifdef __UINTPTR_TYPE__ +typedef __UINTPTR_TYPE__ uintptr_t; +#endif + +/* 7.8.1.5 Greatest-width integer types */ + +typedef __INTMAX_TYPE__ intmax_t; +typedef __UINTMAX_TYPE__ uintmax_t; + +#if (!defined __cplusplus || __cplusplus >= 201103L \ + || defined __STDC_LIMIT_MACROS) + +/* 7.18.2 Limits of specified-width integer types */ + +#ifdef __INT8_MAX__ +# undef INT8_MAX +# define INT8_MAX __INT8_MAX__ +# undef INT8_MIN +# define INT8_MIN (-INT8_MAX - 1) +#endif +#ifdef __UINT8_MAX__ +# undef UINT8_MAX +# define UINT8_MAX __UINT8_MAX__ +#endif +#ifdef __INT16_MAX__ +# undef INT16_MAX +# define INT16_MAX __INT16_MAX__ +# undef INT16_MIN +# define INT16_MIN (-INT16_MAX - 1) +#endif +#ifdef __UINT16_MAX__ +# undef UINT16_MAX +# define UINT16_MAX __UINT16_MAX__ +#endif +#ifdef __INT32_MAX__ +# undef INT32_MAX +# define INT32_MAX __INT32_MAX__ +# undef INT32_MIN +# define INT32_MIN (-INT32_MAX - 1) +#endif +#ifdef __UINT32_MAX__ +# undef UINT32_MAX +# define UINT32_MAX __UINT32_MAX__ +#endif +#ifdef __INT64_MAX__ +# undef INT64_MAX +# define INT64_MAX __INT64_MAX__ +# undef INT64_MIN +# define INT64_MIN (-INT64_MAX - 1) +#endif +#ifdef __UINT64_MAX__ +# undef UINT64_MAX +# define UINT64_MAX __UINT64_MAX__ +#endif + +#undef INT_LEAST8_MAX +#define INT_LEAST8_MAX __INT_LEAST8_MAX__ +#undef INT_LEAST8_MIN +#define INT_LEAST8_MIN (-INT_LEAST8_MAX - 1) +#undef UINT_LEAST8_MAX +#define UINT_LEAST8_MAX __UINT_LEAST8_MAX__ +#undef INT_LEAST16_MAX +#define INT_LEAST16_MAX __INT_LEAST16_MAX__ +#undef INT_LEAST16_MIN +#define INT_LEAST16_MIN (-INT_LEAST16_MAX - 1) +#undef UINT_LEAST16_MAX +#define UINT_LEAST16_MAX __UINT_LEAST16_MAX__ +#undef INT_LEAST32_MAX +#define INT_LEAST32_MAX __INT_LEAST32_MAX__ +#undef INT_LEAST32_MIN +#define INT_LEAST32_MIN (-INT_LEAST32_MAX - 1) +#undef UINT_LEAST32_MAX +#define UINT_LEAST32_MAX __UINT_LEAST32_MAX__ +#undef INT_LEAST64_MAX +#define INT_LEAST64_MAX __INT_LEAST64_MAX__ +#undef INT_LEAST64_MIN +#define INT_LEAST64_MIN (-INT_LEAST64_MAX - 1) +#undef UINT_LEAST64_MAX +#define UINT_LEAST64_MAX __UINT_LEAST64_MAX__ + +#undef INT_FAST8_MAX +#define INT_FAST8_MAX __INT_FAST8_MAX__ +#undef INT_FAST8_MIN +#define INT_FAST8_MIN (-INT_FAST8_MAX - 1) +#undef UINT_FAST8_MAX +#define UINT_FAST8_MAX __UINT_FAST8_MAX__ +#undef INT_FAST16_MAX +#define INT_FAST16_MAX __INT_FAST16_MAX__ +#undef INT_FAST16_MIN +#define INT_FAST16_MIN (-INT_FAST16_MAX - 1) +#undef UINT_FAST16_MAX +#define UINT_FAST16_MAX __UINT_FAST16_MAX__ +#undef INT_FAST32_MAX +#define INT_FAST32_MAX __INT_FAST32_MAX__ +#undef INT_FAST32_MIN +#define INT_FAST32_MIN (-INT_FAST32_MAX - 1) +#undef UINT_FAST32_MAX +#define UINT_FAST32_MAX __UINT_FAST32_MAX__ +#undef INT_FAST64_MAX +#define INT_FAST64_MAX __INT_FAST64_MAX__ +#undef INT_FAST64_MIN +#define INT_FAST64_MIN (-INT_FAST64_MAX - 1) +#undef UINT_FAST64_MAX +#define UINT_FAST64_MAX __UINT_FAST64_MAX__ + +#ifdef __INTPTR_MAX__ +# undef INTPTR_MAX +# define INTPTR_MAX __INTPTR_MAX__ +# undef INTPTR_MIN +# define INTPTR_MIN (-INTPTR_MAX - 1) +#endif +#ifdef __UINTPTR_MAX__ +# undef UINTPTR_MAX +# define UINTPTR_MAX __UINTPTR_MAX__ +#endif + +#undef INTMAX_MAX +#define INTMAX_MAX __INTMAX_MAX__ +#undef INTMAX_MIN +#define INTMAX_MIN (-INTMAX_MAX - 1) +#undef UINTMAX_MAX +#define UINTMAX_MAX __UINTMAX_MAX__ + +/* 7.18.3 Limits of other integer types */ + +#undef PTRDIFF_MAX +#define PTRDIFF_MAX __PTRDIFF_MAX__ +#undef PTRDIFF_MIN +#define PTRDIFF_MIN (-PTRDIFF_MAX - 1) + +#undef SIG_ATOMIC_MAX +#define SIG_ATOMIC_MAX __SIG_ATOMIC_MAX__ +#undef SIG_ATOMIC_MIN +#define SIG_ATOMIC_MIN __SIG_ATOMIC_MIN__ + +#undef SIZE_MAX +#define SIZE_MAX __SIZE_MAX__ + +#undef WCHAR_MAX +#define WCHAR_MAX __WCHAR_MAX__ +#undef WCHAR_MIN +#define WCHAR_MIN __WCHAR_MIN__ + +#undef WINT_MAX +#define WINT_MAX __WINT_MAX__ +#undef WINT_MIN +#define WINT_MIN __WINT_MIN__ + +#endif /* (!defined __cplusplus || __cplusplus >= 201103L + || defined __STDC_LIMIT_MACROS) */ + +#if (!defined __cplusplus || __cplusplus >= 201103L \ + || defined __STDC_CONSTANT_MACROS) + +#undef INT8_C +#define INT8_C(c) __INT8_C(c) +#undef INT16_C +#define INT16_C(c) __INT16_C(c) +#undef INT32_C +#define INT32_C(c) __INT32_C(c) +#undef INT64_C +#define INT64_C(c) __INT64_C(c) +#undef UINT8_C +#define UINT8_C(c) __UINT8_C(c) +#undef UINT16_C +#define UINT16_C(c) __UINT16_C(c) +#undef UINT32_C +#define UINT32_C(c) __UINT32_C(c) +#undef UINT64_C +#define UINT64_C(c) __UINT64_C(c) +#undef INTMAX_C +#define INTMAX_C(c) __INTMAX_C(c) +#undef UINTMAX_C +#define UINTMAX_C(c) __UINTMAX_C(c) + +#endif /* (!defined __cplusplus || __cplusplus >= 201103L + || defined __STDC_CONSTANT_MACROS) */ + +#ifdef __STDC_WANT_IEC_60559_BFP_EXT__ +/* TS 18661-1 widths of integer types. */ + +#ifdef __INT8_TYPE__ +# undef INT8_WIDTH +# define INT8_WIDTH 8 +#endif +#ifdef __UINT8_TYPE__ +# undef UINT8_WIDTH +# define UINT8_WIDTH 8 +#endif +#ifdef __INT16_TYPE__ +# undef INT16_WIDTH +# define INT16_WIDTH 16 +#endif +#ifdef __UINT16_TYPE__ +# undef UINT16_WIDTH +# define UINT16_WIDTH 16 +#endif +#ifdef __INT32_TYPE__ +# undef INT32_WIDTH +# define INT32_WIDTH 32 +#endif +#ifdef __UINT32_TYPE__ +# undef UINT32_WIDTH +# define UINT32_WIDTH 32 +#endif +#ifdef __INT64_TYPE__ +# undef INT64_WIDTH +# define INT64_WIDTH 64 +#endif +#ifdef __UINT64_TYPE__ +# undef UINT64_WIDTH +# define UINT64_WIDTH 64 +#endif + +#undef INT_LEAST8_WIDTH +#define INT_LEAST8_WIDTH __INT_LEAST8_WIDTH__ +#undef UINT_LEAST8_WIDTH +#define UINT_LEAST8_WIDTH __INT_LEAST8_WIDTH__ +#undef INT_LEAST16_WIDTH +#define INT_LEAST16_WIDTH __INT_LEAST16_WIDTH__ +#undef UINT_LEAST16_WIDTH +#define UINT_LEAST16_WIDTH __INT_LEAST16_WIDTH__ +#undef INT_LEAST32_WIDTH +#define INT_LEAST32_WIDTH __INT_LEAST32_WIDTH__ +#undef UINT_LEAST32_WIDTH +#define UINT_LEAST32_WIDTH __INT_LEAST32_WIDTH__ +#undef INT_LEAST64_WIDTH +#define INT_LEAST64_WIDTH __INT_LEAST64_WIDTH__ +#undef UINT_LEAST64_WIDTH +#define UINT_LEAST64_WIDTH __INT_LEAST64_WIDTH__ + +#undef INT_FAST8_WIDTH +#define INT_FAST8_WIDTH __INT_FAST8_WIDTH__ +#undef UINT_FAST8_WIDTH +#define UINT_FAST8_WIDTH __INT_FAST8_WIDTH__ +#undef INT_FAST16_WIDTH +#define INT_FAST16_WIDTH __INT_FAST16_WIDTH__ +#undef UINT_FAST16_WIDTH +#define UINT_FAST16_WIDTH __INT_FAST16_WIDTH__ +#undef INT_FAST32_WIDTH +#define INT_FAST32_WIDTH __INT_FAST32_WIDTH__ +#undef UINT_FAST32_WIDTH +#define UINT_FAST32_WIDTH __INT_FAST32_WIDTH__ +#undef INT_FAST64_WIDTH +#define INT_FAST64_WIDTH __INT_FAST64_WIDTH__ +#undef UINT_FAST64_WIDTH +#define UINT_FAST64_WIDTH __INT_FAST64_WIDTH__ + +#ifdef __INTPTR_TYPE__ +# undef INTPTR_WIDTH +# define INTPTR_WIDTH __INTPTR_WIDTH__ +#endif +#ifdef __UINTPTR_TYPE__ +# undef UINTPTR_WIDTH +# define UINTPTR_WIDTH __INTPTR_WIDTH__ +#endif + +#undef INTMAX_WIDTH +#define INTMAX_WIDTH __INTMAX_WIDTH__ +#undef UINTMAX_WIDTH +#define UINTMAX_WIDTH __INTMAX_WIDTH__ + +#undef PTRDIFF_WIDTH +#define PTRDIFF_WIDTH __PTRDIFF_WIDTH__ + +#undef SIG_ATOMIC_WIDTH +#define SIG_ATOMIC_WIDTH __SIG_ATOMIC_WIDTH__ + +#undef SIZE_WIDTH +#define SIZE_WIDTH __SIZE_WIDTH__ + +#undef WCHAR_WIDTH +#define WCHAR_WIDTH __WCHAR_WIDTH__ + +#undef WINT_WIDTH +#define WINT_WIDTH __WINT_WIDTH__ + +#endif + +#endif /* _GCC_STDINT_H */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/stdint.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/stdint.h new file mode 100644 index 0000000..83b6f70 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/stdint.h @@ -0,0 +1,14 @@ +#ifndef _GCC_WRAP_STDINT_H +#if __STDC_HOSTED__ +# if defined __cplusplus && __cplusplus >= 201103L +# undef __STDC_LIMIT_MACROS +# define __STDC_LIMIT_MACROS +# undef __STDC_CONSTANT_MACROS +# define __STDC_CONSTANT_MACROS +# endif +# include_next +#else +# include "stdint-gcc.h" +#endif +#define _GCC_WRAP_STDINT_H +#endif diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/stdnoreturn.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/stdnoreturn.h new file mode 100644 index 0000000..739c2e3 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/stdnoreturn.h @@ -0,0 +1,35 @@ +/* Copyright (C) 2011-2017 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. */ + +/* ISO C1X: 7.23 _Noreturn . */ + +#ifndef _STDNORETURN_H +#define _STDNORETURN_H + +#ifndef __cplusplus + +#define noreturn _Noreturn + +#endif + +#endif /* stdnoreturn.h */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/unwind-arm-common.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/unwind-arm-common.h new file mode 100644 index 0000000..abf50ba --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/unwind-arm-common.h @@ -0,0 +1,250 @@ +/* Header file for the ARM EABI and C6X unwinders + Copyright (C) 2003-2017 Free Software Foundation, Inc. + Contributed by Paul Brook + + This file is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the + Free Software Foundation; either version 3, or (at your option) any + later version. + + This file is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* Language-independent unwinder header public defines. This contains both + ABI defined objects, and GNU support routines. */ + +#ifndef UNWIND_ARM_COMMON_H +#define UNWIND_ARM_COMMON_H + +#define __ARM_EABI_UNWINDER__ 1 + +#ifdef __cplusplus +extern "C" { +#endif + typedef unsigned _Unwind_Word __attribute__((__mode__(__word__))); + typedef signed _Unwind_Sword __attribute__((__mode__(__word__))); + typedef unsigned _Unwind_Ptr __attribute__((__mode__(__pointer__))); + typedef unsigned _Unwind_Internal_Ptr __attribute__((__mode__(__pointer__))); + typedef _Unwind_Word _uw; + typedef unsigned _uw64 __attribute__((mode(__DI__))); + typedef unsigned _uw16 __attribute__((mode(__HI__))); + typedef unsigned _uw8 __attribute__((mode(__QI__))); + + typedef enum + { + _URC_OK = 0, /* operation completed successfully */ + _URC_FOREIGN_EXCEPTION_CAUGHT = 1, + _URC_END_OF_STACK = 5, + _URC_HANDLER_FOUND = 6, + _URC_INSTALL_CONTEXT = 7, + _URC_CONTINUE_UNWIND = 8, + _URC_FAILURE = 9 /* unspecified failure of some kind */ + } + _Unwind_Reason_Code; + + typedef enum + { + _US_VIRTUAL_UNWIND_FRAME = 0, + _US_UNWIND_FRAME_STARTING = 1, + _US_UNWIND_FRAME_RESUME = 2, + _US_ACTION_MASK = 3, + _US_FORCE_UNWIND = 8, + _US_END_OF_STACK = 16 + } + _Unwind_State; + + /* Provided only for compatibility with existing code. */ + typedef int _Unwind_Action; +#define _UA_SEARCH_PHASE 1 +#define _UA_CLEANUP_PHASE 2 +#define _UA_HANDLER_FRAME 4 +#define _UA_FORCE_UNWIND 8 +#define _UA_END_OF_STACK 16 +#define _URC_NO_REASON _URC_OK + + typedef struct _Unwind_Control_Block _Unwind_Control_Block; + typedef struct _Unwind_Context _Unwind_Context; + typedef _uw _Unwind_EHT_Header; + + + /* UCB: */ + + struct _Unwind_Control_Block + { + char exception_class[8]; + void (*exception_cleanup)(_Unwind_Reason_Code, _Unwind_Control_Block *); + /* Unwinder cache, private fields for the unwinder's use */ + struct + { + _uw reserved1; /* Forced unwind stop fn, 0 if not forced */ + _uw reserved2; /* Personality routine address */ + _uw reserved3; /* Saved callsite address */ + _uw reserved4; /* Forced unwind stop arg */ + _uw reserved5; + } + unwinder_cache; + /* Propagation barrier cache (valid after phase 1): */ + struct + { + _uw sp; + _uw bitpattern[5]; + } + barrier_cache; + /* Cleanup cache (preserved over cleanup): */ + struct + { + _uw bitpattern[4]; + } + cleanup_cache; + /* Pr cache (for pr's benefit): */ + struct + { + _uw fnstart; /* function start address */ + _Unwind_EHT_Header *ehtp; /* pointer to EHT entry header word */ + _uw additional; /* additional data */ + _uw reserved1; + } + pr_cache; + long long int :0; /* Force alignment to 8-byte boundary */ + }; + + /* Virtual Register Set*/ + + typedef enum + { + _UVRSC_CORE = 0, /* integer register */ + _UVRSC_VFP = 1, /* vfp */ + _UVRSC_FPA = 2, /* fpa */ + _UVRSC_WMMXD = 3, /* Intel WMMX data register */ + _UVRSC_WMMXC = 4 /* Intel WMMX control register */ + } + _Unwind_VRS_RegClass; + + typedef enum + { + _UVRSD_UINT32 = 0, + _UVRSD_VFPX = 1, + _UVRSD_FPAX = 2, + _UVRSD_UINT64 = 3, + _UVRSD_FLOAT = 4, + _UVRSD_DOUBLE = 5 + } + _Unwind_VRS_DataRepresentation; + + typedef enum + { + _UVRSR_OK = 0, + _UVRSR_NOT_IMPLEMENTED = 1, + _UVRSR_FAILED = 2 + } + _Unwind_VRS_Result; + + /* Frame unwinding state. */ + typedef struct + { + /* The current word (bytes packed msb first). */ + _uw data; + /* Pointer to the next word of data. */ + _uw *next; + /* The number of bytes left in this word. */ + _uw8 bytes_left; + /* The number of words pointed to by ptr. */ + _uw8 words_left; + } + __gnu_unwind_state; + + typedef _Unwind_Reason_Code (*personality_routine) (_Unwind_State, + _Unwind_Control_Block *, _Unwind_Context *); + + _Unwind_VRS_Result _Unwind_VRS_Set(_Unwind_Context *, _Unwind_VRS_RegClass, + _uw, _Unwind_VRS_DataRepresentation, + void *); + + _Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *, _Unwind_VRS_RegClass, + _uw, _Unwind_VRS_DataRepresentation, + void *); + + _Unwind_VRS_Result _Unwind_VRS_Pop(_Unwind_Context *, _Unwind_VRS_RegClass, + _uw, _Unwind_VRS_DataRepresentation); + + + /* Support functions for the PR. */ +#define _Unwind_Exception _Unwind_Control_Block + typedef char _Unwind_Exception_Class[8]; + + void * _Unwind_GetLanguageSpecificData (_Unwind_Context *); + _Unwind_Ptr _Unwind_GetRegionStart (_Unwind_Context *); + + _Unwind_Ptr _Unwind_GetDataRelBase (_Unwind_Context *); + /* This should never be used. */ + _Unwind_Ptr _Unwind_GetTextRelBase (_Unwind_Context *); + + /* Interface functions: */ + _Unwind_Reason_Code _Unwind_RaiseException(_Unwind_Control_Block *ucbp); + void __attribute__((noreturn)) _Unwind_Resume(_Unwind_Control_Block *ucbp); + _Unwind_Reason_Code _Unwind_Resume_or_Rethrow (_Unwind_Control_Block *ucbp); + + typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn) + (int, _Unwind_Action, _Unwind_Exception_Class, + _Unwind_Control_Block *, struct _Unwind_Context *, void *); + _Unwind_Reason_Code _Unwind_ForcedUnwind (_Unwind_Control_Block *, + _Unwind_Stop_Fn, void *); + /* @@@ Use unwind data to perform a stack backtrace. The trace callback + is called for every stack frame in the call chain, but no cleanup + actions are performed. */ + typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn) (_Unwind_Context *, void *); + _Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn, + void*); + + _Unwind_Word _Unwind_GetCFA (struct _Unwind_Context *); + void _Unwind_Complete(_Unwind_Control_Block *ucbp); + void _Unwind_DeleteException (_Unwind_Exception *); + + _Unwind_Reason_Code __gnu_unwind_frame (_Unwind_Control_Block *, + _Unwind_Context *); + _Unwind_Reason_Code __gnu_unwind_execute (_Unwind_Context *, + __gnu_unwind_state *); + + static inline _Unwind_Word + _Unwind_GetGR (_Unwind_Context *context, int regno) + { + _uw val; + _Unwind_VRS_Get (context, _UVRSC_CORE, regno, _UVRSD_UINT32, &val); + return val; + } + +#define _Unwind_GetIPInfo(context, ip_before_insn) \ + (*ip_before_insn = 0, _Unwind_GetIP (context)) + + static inline void + _Unwind_SetGR (_Unwind_Context *context, int regno, _Unwind_Word val) + { + _Unwind_VRS_Set (context, _UVRSC_CORE, regno, _UVRSD_UINT32, &val); + } + + _Unwind_Ptr _Unwind_GetRegionStart (_Unwind_Context *); + void * _Unwind_GetLanguageSpecificData (_Unwind_Context *); + +/* leb128 type numbers have a potentially unlimited size. + The target of the following definitions of _sleb128_t and _uleb128_t + is to have efficient data types large enough to hold the leb128 type + numbers used in the unwind code. */ +typedef long _sleb128_t; +typedef unsigned long _uleb128_t; + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* defined UNWIND_ARM_COMMON_H */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/unwind.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/unwind.h new file mode 100644 index 0000000..8181cb6 --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/unwind.h @@ -0,0 +1,93 @@ +/* Header file for the ARM EABI unwinder + Copyright (C) 2003-2017 Free Software Foundation, Inc. + Contributed by Paul Brook + + This file is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the + Free Software Foundation; either version 3, or (at your option) any + later version. + + This file is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +/* Language-independent unwinder header public defines. This contains both + ABI defined objects, and GNU support routines. */ + +#ifndef UNWIND_ARM_H +#define UNWIND_ARM_H + +#include "unwind-arm-common.h" + +#define UNWIND_STACK_REG 13 +/* Use IP as a scratch register within the personality routine. */ +#define UNWIND_POINTER_REG 12 + +#ifdef __cplusplus +extern "C" { +#endif + /* Decode an R_ARM_TARGET2 relocation. */ + static inline _Unwind_Word + _Unwind_decode_typeinfo_ptr (_Unwind_Word base __attribute__ ((unused)), + _Unwind_Word ptr) + { + _Unwind_Word tmp; + + tmp = *(_Unwind_Word *) ptr; + /* Zero values are always NULL. */ + if (!tmp) + return 0; + +#if (defined(linux) && !defined(__uClinux__)) || defined(__NetBSD__) \ + || defined(__FreeBSD__) + /* Pc-relative indirect. */ +#define _GLIBCXX_OVERRIDE_TTYPE_ENCODING (DW_EH_PE_pcrel | DW_EH_PE_indirect) + tmp += ptr; + tmp = *(_Unwind_Word *) tmp; +#elif defined(__symbian__) || defined(__uClinux__) +#define _GLIBCXX_OVERRIDE_TTYPE_ENCODING (DW_EH_PE_absptr) + /* Absolute pointer. Nothing more to do. */ +#else +#define _GLIBCXX_OVERRIDE_TTYPE_ENCODING (DW_EH_PE_pcrel) + /* Pc-relative pointer. */ + tmp += ptr; +#endif + return tmp; + } + + static inline _Unwind_Reason_Code + __gnu_unwind_24bit (_Unwind_Context * context __attribute__ ((unused)), + _uw data __attribute__ ((unused)), + int compact __attribute__ ((unused))) + { + return _URC_FAILURE; + } +#ifndef __FreeBSD__ + /* Return the address of the instruction, not the actual IP value. */ +#define _Unwind_GetIP(context) \ + (_Unwind_GetGR (context, 15) & ~(_Unwind_Word)1) + +#define _Unwind_SetIP(context, val) \ + _Unwind_SetGR (context, 15, val | (_Unwind_GetGR (context, 15) & 1)) +#else + #undef _Unwind_GetIPInfo + _Unwind_Ptr _Unwind_GetIP (struct _Unwind_Context *); + _Unwind_Ptr _Unwind_GetIPInfo (struct _Unwind_Context *, int *); + void _Unwind_SetIP (struct _Unwind_Context *, _Unwind_Ptr); +#endif + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* defined UNWIND_ARM_H */ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/include/varargs.h b/lib/gcc/arm-linux-androideabi/7.2.0/include/varargs.h new file mode 100644 index 0000000..4b9803e --- /dev/null +++ b/lib/gcc/arm-linux-androideabi/7.2.0/include/varargs.h @@ -0,0 +1,7 @@ +#ifndef _VARARGS_H +#define _VARARGS_H + +#error "GCC no longer implements ." +#error "Revise your code to use ." + +#endif diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/libgcc.a b/lib/gcc/arm-linux-androideabi/7.2.0/libgcc.a new file mode 100644 index 0000000..f276c05 Binary files /dev/null and b/lib/gcc/arm-linux-androideabi/7.2.0/libgcc.a differ diff --git a/lib/gcc/arm-linux-androideabi/7.2.0/libgcov.a b/lib/gcc/arm-linux-androideabi/7.2.0/libgcov.a new file mode 100644 index 0000000..ee110cf Binary files /dev/null and b/lib/gcc/arm-linux-androideabi/7.2.0/libgcov.a differ diff --git a/lib64/libcc1.so b/lib64/libcc1.so new file mode 120000 index 0000000..5200aa3 --- /dev/null +++ b/lib64/libcc1.so @@ -0,0 +1 @@ +libcc1.so.0.0.0 \ No newline at end of file diff --git a/lib64/libcc1.so.0 b/lib64/libcc1.so.0 new file mode 120000 index 0000000..5200aa3 --- /dev/null +++ b/lib64/libcc1.so.0 @@ -0,0 +1 @@ +libcc1.so.0.0.0 \ No newline at end of file diff --git a/lib64/libcc1.so.0.0.0 b/lib64/libcc1.so.0.0.0 new file mode 100755 index 0000000..fe2e749 Binary files /dev/null and b/lib64/libcc1.so.0.0.0 differ diff --git a/libexec/gcc/arm-linux-androideabi/7.2.0/cc1 b/libexec/gcc/arm-linux-androideabi/7.2.0/cc1 new file mode 100755 index 0000000..786805e Binary files /dev/null and b/libexec/gcc/arm-linux-androideabi/7.2.0/cc1 differ diff --git a/libexec/gcc/arm-linux-androideabi/7.2.0/cc1obj b/libexec/gcc/arm-linux-androideabi/7.2.0/cc1obj new file mode 100755 index 0000000..2f60ffe Binary files /dev/null and b/libexec/gcc/arm-linux-androideabi/7.2.0/cc1obj differ diff --git a/libexec/gcc/arm-linux-androideabi/7.2.0/cc1objplus b/libexec/gcc/arm-linux-androideabi/7.2.0/cc1objplus new file mode 100755 index 0000000..e1d888e Binary files /dev/null and b/libexec/gcc/arm-linux-androideabi/7.2.0/cc1objplus differ diff --git a/libexec/gcc/arm-linux-androideabi/7.2.0/cc1plus b/libexec/gcc/arm-linux-androideabi/7.2.0/cc1plus new file mode 100755 index 0000000..c271711 Binary files /dev/null and b/libexec/gcc/arm-linux-androideabi/7.2.0/cc1plus differ diff --git a/libexec/gcc/arm-linux-androideabi/7.2.0/collect2 b/libexec/gcc/arm-linux-androideabi/7.2.0/collect2 new file mode 100755 index 0000000..20062b6 Binary files /dev/null and b/libexec/gcc/arm-linux-androideabi/7.2.0/collect2 differ diff --git a/libexec/gcc/arm-linux-androideabi/7.2.0/liblto_plugin.so b/libexec/gcc/arm-linux-androideabi/7.2.0/liblto_plugin.so new file mode 120000 index 0000000..f25ba88 --- /dev/null +++ b/libexec/gcc/arm-linux-androideabi/7.2.0/liblto_plugin.so @@ -0,0 +1 @@ +liblto_plugin.so.0.0.0 \ No newline at end of file diff --git a/libexec/gcc/arm-linux-androideabi/7.2.0/liblto_plugin.so.0 b/libexec/gcc/arm-linux-androideabi/7.2.0/liblto_plugin.so.0 new file mode 120000 index 0000000..f25ba88 --- /dev/null +++ b/libexec/gcc/arm-linux-androideabi/7.2.0/liblto_plugin.so.0 @@ -0,0 +1 @@ +liblto_plugin.so.0.0.0 \ No newline at end of file diff --git a/libexec/gcc/arm-linux-androideabi/7.2.0/liblto_plugin.so.0.0.0 b/libexec/gcc/arm-linux-androideabi/7.2.0/liblto_plugin.so.0.0.0 new file mode 100755 index 0000000..368b348 Binary files /dev/null and b/libexec/gcc/arm-linux-androideabi/7.2.0/liblto_plugin.so.0.0.0 differ diff --git a/libexec/gcc/arm-linux-androideabi/7.2.0/lto-wrapper b/libexec/gcc/arm-linux-androideabi/7.2.0/lto-wrapper new file mode 100755 index 0000000..79e3494 Binary files /dev/null and b/libexec/gcc/arm-linux-androideabi/7.2.0/lto-wrapper differ diff --git a/libexec/gcc/arm-linux-androideabi/7.2.0/lto1 b/libexec/gcc/arm-linux-androideabi/7.2.0/lto1 new file mode 100755 index 0000000..4e8dad8 Binary files /dev/null and b/libexec/gcc/arm-linux-androideabi/7.2.0/lto1 differ diff --git a/libexec/gcc/arm-linux-androideabi/7.2.0/plugin/gengtype b/libexec/gcc/arm-linux-androideabi/7.2.0/plugin/gengtype new file mode 100755 index 0000000..6f3a138 Binary files /dev/null and b/libexec/gcc/arm-linux-androideabi/7.2.0/plugin/gengtype differ diff --git a/repo.prop b/repo.prop new file mode 100644 index 0000000..cc03fd5 --- /dev/null +++ b/repo.prop @@ -0,0 +1,57 @@ +platform/bionic 96c577c48284c376ec065f2b3a2d2987a7eeff5c +platform/development 81be584491c364bbbb3c2339db4374febf354d3c +platform/external/googletest dca68eca3f6328ed43ac70b61fa7bbc5a8c1ce1d +platform/external/libcxx d58cd97288ebe8605d56da477476d6ad014a8150 +platform/external/libcxxabi f9dea50271ccad986065bdaf92e45519efd3a68f +platform/external/libunwind_llvm fecc6f26cfdbfc9cf0ea2021629ac6e85b7c0113 +platform/external/llvm 839c1277f49a1e5c5fa633e96cadc170803ae828 +platform/external/shaderc/glslang a3ee5fea2e0b76d8ea44f81f81cc92dcccda4156 +platform/external/shaderc/shaderc 48feee09e007b52120cfbbcf968ead1e5152c262 +platform/external/shaderc/spirv-headers f34a049cd936c2879749509b4e56344eac8a0660 +platform/external/shaderc/spirv-tools cc5949657c690d30031f84216ae944c864020f1c +platform/external/vulkan-validation-layers 727ecf10b5c219bf14c82bd1e58d856861f0cb86 +platform/ndk c27531463b55059150b21040aa66d98a6a535103 +platform/prebuilts/clang/host/darwin-x86 ce827a504ccce0f032943202fbb5c1ee8fba8857 +platform/prebuilts/clang/host/linux-x86 c3a963bdbc2dc17bf88eced72f02c6b1f879220c +platform/prebuilts/clang/host/windows-x86 696baf341dce8cc784a09988ca3b2c961f31ea55 +platform/prebuilts/cmake/darwin-x86 289589930105f9db21313bfd5aaa4a5fe02509e5 +platform/prebuilts/cmake/linux-x86 ee96b2ec399702e23faee15863fed3ae33144fdd +platform/prebuilts/gcc/darwin-x86/aarch64/aarch64-linux-android-4.9 7c946f823b985a797c63983aa3b3bd1ec2dceab3 +platform/prebuilts/gcc/darwin-x86/arm/arm-eabi-4.8 6d08ca9f45ff685648fd13c75bf5cac4b11c19bb +platform/prebuilts/gcc/darwin-x86/arm/arm-linux-androideabi-4.9 6895d0a62cfb78005df68310c61a7fa5cd20d685 +platform/prebuilts/gcc/darwin-x86/host/headers 4ac4f7cc41cf3c9e36fc3d6cf37fd1cfa9587a68 +platform/prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1 ec5aa66aaa4964c27564d0ec84dc1f18a2d72b7e +platform/prebuilts/gcc/darwin-x86/mips/mips64el-linux-android-4.9 d942564c9331f07026a8f78502d8571206eb7be4 +platform/prebuilts/gcc/darwin-x86/x86/x86_64-linux-android-4.9 9b83502b78b50217b04e66b34a3e017683093cb3 +platform/prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9 b06651d136ffbab2098a2152abff4a14b69632bb +platform/prebuilts/gcc/linux-x86/arm/arm-eabi-4.8 26e93f6af47f7bd3a9beb5c102a5f45e19bfa38a +platform/prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 13f048825a14abafc9cb290f840c276564806390 +platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.11-4.8 a461e1b3161b4817b743ddbfb58a3878e7985c8b +platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 6e91bb7b816df8543cffe14287acbf0412f65550 +platform/prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8 e76a9a506d7ad132f107eb2f7c27b6a8ccb68b91 +platform/prebuilts/gcc/linux-x86/mips/mips64el-linux-android-4.9 388fdc4995d374d76a0c4b292afabac91638e134 +platform/prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 694b42b76aaf30420c2f7764dc56ed928537428e +platform/prebuilts/ndk 05d528cda2f5bb43855277c0dc9ed5e0beb9378e +platform/prebuilts/ninja/darwin-x86 00f798346dedb4a7a6a6dcc9ad32ff09d66ee0db +platform/prebuilts/ninja/linux-x86 6369b19fc3fbe765636af75d394627e2b92599ed +platform/prebuilts/python/darwin-x86/2.7.5 0c5958b1636c47ed7c284f859c8e805fd06a0e63 +platform/prebuilts/python/linux-x86/2.7.5 621d405f7e4db1d7046dde53c0c6e5add3e2743e +platform/prebuilts/renderscript/host/darwin-x86 a0ede5664b4741348c0b6c8d5da06d483dcf2876 +platform/prebuilts/renderscript/host/linux-x86 68a0a1ddacb81c97d718f46ad464a3851d0b67af +platform/prebuilts/renderscript/host/windows-x86 5df9f20565e63906167c82f6120c78e969b3b467 +platform/prebuilts/simpleperf e41e920370b6444549c67c5e85bbaa52cc4a5223 +toolchain/binutils 5318987751bf4e440bf3900416b36648bd41ff74 +toolchain/build 58be6006bb71abb97d7cdff7be3e73d55bbc22b8 +toolchain/cloog 604793eab97d360aef729f064674569ee6dbf3e1 +toolchain/expat 40172a0ae9d40a068f1e1a48ffcf6a1ccf765ed5 +toolchain/gcc 0c5a656a1322e137fa4a251f2ccc6c4022918c0a +toolchain/gdb a231f8846122acd245f790e5ce63501699fff4db +toolchain/gmp b2acd5dbf47868ac5b5bc844e16d2cadcbd4c810 +toolchain/isl 0ccf95726af8ce58ad61ff474addbce3a31ba99c +toolchain/mpc 835d16e92eed875638a8b5d552034c3b1aae045b +toolchain/mpfr de979fc377db766591e7feaf052f0de59be46e76 +toolchain/ppl 979062d362bc5a1c00804237b408b19b4618fb24 +toolchain/python 6a7fc9bfd21da85dda97a8bcd2952e0bfbded424 +toolchain/sed 45df23d6dc8b51ea5cd903d023c10fd7d72415b9 +toolchain/xz a0eb1f5763e7b4a3daf4fd7d1ac9504058cc1082 +toolchain/yasm a159fe073809b4138cf90b7298ea31ea17af85c0 diff --git a/toolchain.mk b/toolchain.mk new file mode 100644 index 0000000..8bf81fb --- /dev/null +++ b/toolchain.mk @@ -0,0 +1,17 @@ +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +LOCAL_PATH := $(call my-dir) + +include $(call all-makefiles-under, $(LOCAL_PATH)) -- cgit v1.2.3