summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Cheng <bccheng@google.com>2012-11-15 23:17:11 -0800
committerBen Cheng <bccheng@google.com>2012-11-15 23:17:11 -0800
commit0f9a15542ac5db44484c9342f136026088d871e5 (patch)
treecd98aa14dc75cdd5a3a659c43ff67972d53840ed
parent1ce662fe112deb5a69f4279eb4feb5aa5eff5511 (diff)
downloadarm-linux-androideabi-4.7-0f9a15542ac5db44484c9342f136026088d871e5.tar.gz
New toolchain build based on gcc 4.7 / binutils 2.22 for darwin-x86.
Change-Id: Iacff8b3f2046914ac036cf0536b9ae145885b56b
-rw-r--r--Android.mk16
-rw-r--r--SOURCES50
-rwxr-xr-xarm-linux-androideabi/bin/arbin0 -> 963280 bytes
-rwxr-xr-xarm-linux-androideabi/bin/asbin0 -> 1641336 bytes
-rwxr-xr-xarm-linux-androideabi/bin/c++bin0 -> 689168 bytes
-rwxr-xr-xarm-linux-androideabi/bin/g++bin0 -> 689168 bytes
-rwxr-xr-xarm-linux-androideabi/bin/gccbin0 -> 685072 bytes
-rwxr-xr-xarm-linux-androideabi/bin/ldbin0 -> 5129344 bytes
-rwxr-xr-xarm-linux-androideabi/bin/ld.bfdbin0 -> 1476296 bytes
-rwxr-xr-xarm-linux-androideabi/bin/ld.goldbin0 -> 5129344 bytes
-rwxr-xr-xarm-linux-androideabi/bin/nmbin0 -> 940872 bytes
-rwxr-xr-xarm-linux-androideabi/bin/objcopybin0 -> 1164312 bytes
-rwxr-xr-xarm-linux-androideabi/bin/objdumpbin0 -> 1403992 bytes
-rwxr-xr-xarm-linux-androideabi/bin/ranlibbin0 -> 963304 bytes
-rwxr-xr-xarm-linux-androideabi/bin/stripbin0 -> 1164312 bytes
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.x244
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xbn241
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xc246
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xd243
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdc246
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdw246
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xn243
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xr163
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xs232
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsc232
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsw231
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xu164
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xw246
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.x244
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xbn241
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xc246
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xd243
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdc246
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdw246
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xn243
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xr163
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xs232
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsc232
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsw231
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xu164
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xw246
-rwxr-xr-xbin/arm-linux-androideabi-addr2linebin0 -> 928584 bytes
-rwxr-xr-xbin/arm-linux-androideabi-arbin0 -> 963280 bytes
-rwxr-xr-xbin/arm-linux-androideabi-asbin0 -> 1641336 bytes
-rwxr-xr-xbin/arm-linux-androideabi-c++bin0 -> 689168 bytes
-rwxr-xr-xbin/arm-linux-androideabi-c++filtbin0 -> 924376 bytes
-rwxr-xr-xbin/arm-linux-androideabi-cppbin0 -> 689168 bytes
-rwxr-xr-xbin/arm-linux-androideabi-elfeditbin0 -> 40200 bytes
-rwxr-xr-xbin/arm-linux-androideabi-g++bin0 -> 689168 bytes
-rwxr-xr-xbin/arm-linux-androideabi-gccbin0 -> 685072 bytes
-rwxr-xr-xbin/arm-linux-androideabi-gcc-4.7bin0 -> 685072 bytes
-rwxr-xr-xbin/arm-linux-androideabi-gcc-arbin0 -> 31856 bytes
-rwxr-xr-xbin/arm-linux-androideabi-gcc-nmbin0 -> 31856 bytes
-rwxr-xr-xbin/arm-linux-androideabi-gcc-ranlibbin0 -> 31856 bytes
-rwxr-xr-xbin/arm-linux-androideabi-gcovbin0 -> 294352 bytes
-rwxr-xr-xbin/arm-linux-androideabi-gdbbin0 -> 4938840 bytes
-rwxr-xr-xbin/arm-linux-androideabi-gdbtuibin0 -> 4938840 bytes
-rwxr-xr-xbin/arm-linux-androideabi-gprofbin0 -> 1012352 bytes
-rwxr-xr-xbin/arm-linux-androideabi-ldbin0 -> 5129344 bytes
-rwxr-xr-xbin/arm-linux-androideabi-ld.bfdbin0 -> 1476296 bytes
-rwxr-xr-xbin/arm-linux-androideabi-ld.goldbin0 -> 5129344 bytes
-rwxr-xr-xbin/arm-linux-androideabi-nmbin0 -> 940872 bytes
-rwxr-xr-xbin/arm-linux-androideabi-objcopybin0 -> 1164312 bytes
-rwxr-xr-xbin/arm-linux-androideabi-objdumpbin0 -> 1403992 bytes
-rwxr-xr-xbin/arm-linux-androideabi-ranlibbin0 -> 963304 bytes
-rwxr-xr-xbin/arm-linux-androideabi-readelfbin0 -> 455848 bytes
-rwxr-xr-xbin/arm-linux-androideabi-runbin0 -> 1188552 bytes
-rwxr-xr-xbin/arm-linux-androideabi-sizebin0 -> 928784 bytes
-rwxr-xr-xbin/arm-linux-androideabi-stringsbin0 -> 928640 bytes
-rwxr-xr-xbin/arm-linux-androideabi-stripbin0 -> 1164312 bytes
-rw-r--r--include/ansidecl.h434
-rw-r--r--include/bfd.h6461
-rw-r--r--include/bfdlink.h819
-rw-r--r--include/symcat.h55
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtbegin.obin0 -> 2688 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtbeginS.obin0 -> 2896 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtbeginT.obin0 -> 2688 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtend.obin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtendS.obin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/armv7-a/libgcc.abin0 -> 5709278 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/armv7-a/libgcov.abin0 -> 121704 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtbegin.obin0 -> 2528 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtbeginS.obin0 -> 2724 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtbeginT.obin0 -> 2528 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtend.obin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtendS.obin0 -> 1069 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/libgcc.abin0 -> 5697398 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/libgcov.abin0 -> 120284 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/crtbegin.obin0 -> 2684 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/crtbeginS.obin0 -> 2892 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/crtbeginT.obin0 -> 2684 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/crtend.obin0 -> 1065 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/crtendS.obin0 -> 1065 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include-fixed/README14
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include-fixed/limits.h172
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include-fixed/linux/a.out.h229
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include-fixed/stdio.h462
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include-fixed/sys/types.h140
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include-fixed/syslimits.h8
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include/arm_neon.h12176
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include/float.h278
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include/iso646.h45
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include/mmintrin.h1254
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include/stdalign.h39
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include/stdarg.h130
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include/stdbool.h50
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include/stddef.h433
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include/stdfix.h204
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include/stdint-gcc.h259
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include/stdint.h8
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include/stdnoreturn.h35
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include/unwind-arm-common.h251
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include/unwind.h83
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/include/varargs.h7
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/libgcc.abin0 -> 5691654 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/libgcov.abin0 -> 121436 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/thumb/crtbegin.obin0 -> 2540 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/thumb/crtbeginS.obin0 -> 2736 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/thumb/crtbeginT.obin0 -> 2540 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/thumb/crtend.obin0 -> 1065 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/thumb/crtendS.obin0 -> 1065 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/thumb/libgcc.abin0 -> 5716654 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.7/thumb/libgcov.abin0 -> 120604 bytes
-rw-r--r--lib/libarm-linux-android-sim.abin0 -> 417248 bytes
-rw-r--r--lib/x86_64/libiberty.abin0 -> 422144 bytes
-rw-r--r--lib32/Android.mk24
-rw-r--r--lib32/libbfd.abin0 -> 909248 bytes
-rwxr-xr-xlib32/libbfd.la41
-rw-r--r--lib32/libiberty.abin0 -> 389000 bytes
-rw-r--r--lib32/libintl.abin0 -> 63096 bytes
-rwxr-xr-xlibexec/gcc/arm-linux-androideabi/4.7/cc1bin0 -> 15326288 bytes
-rwxr-xr-xlibexec/gcc/arm-linux-androideabi/4.7/cc1plusbin0 -> 16606976 bytes
-rwxr-xr-xlibexec/gcc/arm-linux-androideabi/4.7/collect2bin0 -> 412176 bytes
-rwxr-xr-xlibexec/gcc/arm-linux-androideabi/4.7/liblto_plugin.0.sobin0 -> 93592 bytes
-rwxr-xr-xlibexec/gcc/arm-linux-androideabi/4.7/liblto_plugin.la41
-rwxr-xr-xlibexec/gcc/arm-linux-androideabi/4.7/liblto_plugin.sobin0 -> 93592 bytes
-rwxr-xr-xlibexec/gcc/arm-linux-androideabi/4.7/lto-wrapperbin0 -> 550176 bytes
-rwxr-xr-xlibexec/gcc/arm-linux-androideabi/4.7/lto1bin0 -> 15949064 bytes
-rw-r--r--toolchain.mk17
139 files changed, 30189 insertions, 0 deletions
diff --git a/Android.mk b/Android.mk
new file mode 100644
index 0000000..77f84b5
--- /dev/null
+++ b/Android.mk
@@ -0,0 +1,16 @@
+# Copyright (C) 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Empty makefile here prevents Android.mk in subdirectories from being
+# unconditionally included.
diff --git a/SOURCES b/SOURCES
new file mode 100644
index 0000000..dcc38ba
--- /dev/null
+++ b/SOURCES
@@ -0,0 +1,50 @@
+Using built-in specs.
+COLLECT_GCC=/Volumes/android/android-ndk-4.7/toolchains/arm-linux-androideabi-4.7/prebuilt/darwin-x86_64/bin/arm-linux-androideabi-gcc
+COLLECT_LTO_WRAPPER=/Volumes/android/android-ndk-4.7/toolchains/arm-linux-androideabi-4.7/prebuilt/darwin-x86_64/bin/../libexec/gcc/arm-linux-androideabi/4.7/lto-wrapper
+Target: arm-linux-androideabi
+Configured with: /Volumes/android/AOSP-toolchain/build/../gcc/gcc-4.7/configure --prefix=/Volumes/android/toolchain-build-4.7/prefix --target=arm-linux-androideabi --host=x86_64-apple-darwin --build=x86_64-apple-darwin --with-gnu-as --with-gnu-ld --enable-languages=c,c++ --with-gmp=/Volumes/android/toolchain-build-4.7/temp-install --with-mpfr=/Volumes/android/toolchain-build-4.7/temp-install --with-mpc=/Volumes/android/toolchain-build-4.7/temp-install --without-ppl --without-cloog --disable-libssp --enable-threads --disable-nls --disable-libmudflap --disable-libgomp --disable-libstdc__-v3 --disable-sjlj-exceptions --disable-shared --disable-tls --disable-libitm --with-float=soft --with-fpu=vfp --with-arch=armv5te --enable-target-optspace --enable-initfini-array --disable-nls --prefix=/Volumes/android/toolchain-build-4.7/prefix --with-sysroot=/Volumes/android/toolchain-build-4.7/prefix/sysroot --with-binutils-version=2.22 --with-mpfr-version=2.4.1 --with-mpc-version=0.8.1 --with-gmp-version=5.0.5 --with-gcc-version=4.7 --with-gdb-version=7.3.x --with-gxx-include-dir=/Volumes/android/toolchain-build-4.7/prefix/include/c++/4.7 --disable-bootstrap --disable-libquadmath --disable-plugin --enable-gold --with-arch=armv5te --program-transform-name='s&^&arm-linux-androideabi-&' --enable-gold=default
+Thread model: posix
+gcc version 4.7 (GCC)
+
+gcc:
+commit 3c56f697e112c79fb5457538fdc373e348beca24
+Author: Andrew Hsieh <andrewhsieh@google.com>
+
+ Initial checkin of unmodified gcc-5666.3.tar.gz
+
+binutils:
+commit dc51c7a5e0e2620dc85e6c9c46027dce94a85e1f
+Author: Pavel Chupin <pavel.v.chupin@intel.com>
+
+ Fix gold issue on fsck_msdos build when objects built by ICC
+
+gdb:
+commit ab1098c4c1c197b6508d86989b4e60d48e0fd512
+Author: Ryan V. Bissell <rbissell@nvidia.com>
+
+ bionic support for library-list-svr4.dtd
+
+gmp:
+commit e6b9669dafc6a5f83c80b4b4176359b78bccdc90
+Author: David 'Digit' Turner <digit@google.com>
+
+ Add gmp-5.0.5.tar.bz2
+
+mpfr:
+commit bde731396c38c8d5b0bca800aace6a690d0d2978
+Author: Jing Yu <jingyu@google.com>
+
+ add mpfr-2.4.2.tar.bz2
+
+build:
+commit 106603b925b7f20177c5b7613535d9d43ed35b7d
+Author: Andrew Hsieh <andrewhsieh@google.com>
+
+ Options brought in from core combo for IA
+
+expat:
+commit 40172a0ae9d40a068f1e1a48ffcf6a1ccf765ed5
+Author: Jing Yu <jingyu@google.com>
+
+ expat package for building gdb-7.3
+
diff --git a/arm-linux-androideabi/bin/ar b/arm-linux-androideabi/bin/ar
new file mode 100755
index 0000000..ac5c433
--- /dev/null
+++ b/arm-linux-androideabi/bin/ar
Binary files differ
diff --git a/arm-linux-androideabi/bin/as b/arm-linux-androideabi/bin/as
new file mode 100755
index 0000000..0c70905
--- /dev/null
+++ b/arm-linux-androideabi/bin/as
Binary files differ
diff --git a/arm-linux-androideabi/bin/c++ b/arm-linux-androideabi/bin/c++
new file mode 100755
index 0000000..708dcaa
--- /dev/null
+++ b/arm-linux-androideabi/bin/c++
Binary files differ
diff --git a/arm-linux-androideabi/bin/g++ b/arm-linux-androideabi/bin/g++
new file mode 100755
index 0000000..1a5bd6a
--- /dev/null
+++ b/arm-linux-androideabi/bin/g++
Binary files differ
diff --git a/arm-linux-androideabi/bin/gcc b/arm-linux-androideabi/bin/gcc
new file mode 100755
index 0000000..259f26d
--- /dev/null
+++ b/arm-linux-androideabi/bin/gcc
Binary files differ
diff --git a/arm-linux-androideabi/bin/ld b/arm-linux-androideabi/bin/ld
new file mode 100755
index 0000000..a528ef0
--- /dev/null
+++ b/arm-linux-androideabi/bin/ld
Binary files differ
diff --git a/arm-linux-androideabi/bin/ld.bfd b/arm-linux-androideabi/bin/ld.bfd
new file mode 100755
index 0000000..8b10b46
--- /dev/null
+++ b/arm-linux-androideabi/bin/ld.bfd
Binary files differ
diff --git a/arm-linux-androideabi/bin/ld.gold b/arm-linux-androideabi/bin/ld.gold
new file mode 100755
index 0000000..a528ef0
--- /dev/null
+++ b/arm-linux-androideabi/bin/ld.gold
Binary files differ
diff --git a/arm-linux-androideabi/bin/nm b/arm-linux-androideabi/bin/nm
new file mode 100755
index 0000000..8aa03fc
--- /dev/null
+++ b/arm-linux-androideabi/bin/nm
Binary files differ
diff --git a/arm-linux-androideabi/bin/objcopy b/arm-linux-androideabi/bin/objcopy
new file mode 100755
index 0000000..92e7192
--- /dev/null
+++ b/arm-linux-androideabi/bin/objcopy
Binary files differ
diff --git a/arm-linux-androideabi/bin/objdump b/arm-linux-androideabi/bin/objdump
new file mode 100755
index 0000000..6674ebd
--- /dev/null
+++ b/arm-linux-androideabi/bin/objdump
Binary files differ
diff --git a/arm-linux-androideabi/bin/ranlib b/arm-linux-androideabi/bin/ranlib
new file mode 100755
index 0000000..2e787b5
--- /dev/null
+++ b/arm-linux-androideabi/bin/ranlib
Binary files differ
diff --git a/arm-linux-androideabi/bin/strip b/arm-linux-androideabi/bin/strip
new file mode 100755
index 0000000..3e40042
--- /dev/null
+++ b/arm-linux-androideabi/bin/strip
Binary files differ
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.x b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.x
new file mode 100644
index 0000000..62ac22f
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.x
@@ -0,0 +1,244 @@
+/* Default linker script, for normal executables */
+/* Modified for Android. */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .rela.iplt :
+ {
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xbn b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xbn
new file mode 100644
index 0000000..26881b4
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xbn
@@ -0,0 +1,241 @@
+/* Script for -N: mix text and data on same page; don't align data */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .rela.iplt :
+ {
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = .;
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xc b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xc
new file mode 100644
index 0000000..e7e0411
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xc
@@ -0,0 +1,246 @@
+/* Script for -z combreloc: combine and sort reloc sections */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xd b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xd
new file mode 100644
index 0000000..2ea0834
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xd
@@ -0,0 +1,243 @@
+/* Script for ld -pie: link position independent executable */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .rela.iplt :
+ {
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdc b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdc
new file mode 100644
index 0000000..98e5302
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdc
@@ -0,0 +1,246 @@
+/* Script for -pie -z combreloc: position independent executable, combine & sort relocs */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdw b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdw
new file mode 100644
index 0000000..50d4cce
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdw
@@ -0,0 +1,246 @@
+/* Script for -pie -z combreloc -z now -z relro: position independent executable, combine & sort relocs */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xn b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xn
new file mode 100644
index 0000000..247182f
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xn
@@ -0,0 +1,243 @@
+/* Script for -n: mix text and data on same page */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .rela.iplt :
+ {
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xr b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xr
new file mode 100644
index 0000000..8f2e34d
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xr
@@ -0,0 +1,163 @@
+/* Script for ld -r: link without relocation */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ /* For some reason, the Solaris linker makes bad executables
+ if gld -r is used and the intermediate file has sections starting
+ at non-zero addresses. Could be a Solaris ld bug, could be a GNU ld
+ bug. But for now assigning the zero vmas works. */
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ .interp 0 : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash 0 : { *(.hash) }
+ .gnu.hash 0 : { *(.gnu.hash) }
+ .dynsym 0 : { *(.dynsym) }
+ .dynstr 0 : { *(.dynstr) }
+ .gnu.version 0 : { *(.gnu.version) }
+ .gnu.version_d 0: { *(.gnu.version_d) }
+ .gnu.version_r 0: { *(.gnu.version_r) }
+ .rel.init 0 : { *(.rel.init) }
+ .rela.init 0 : { *(.rela.init) }
+ .rel.text 0 : { *(.rel.text) }
+ .rela.text 0 : { *(.rela.text) }
+ .rel.fini 0 : { *(.rel.fini) }
+ .rela.fini 0 : { *(.rela.fini) }
+ .rel.rodata 0 : { *(.rel.rodata) }
+ .rela.rodata 0 : { *(.rela.rodata) }
+ .rel.data.rel.ro 0 : { *(.rel.data.rel.ro) }
+ .rela.data.rel.ro 0 : { *(.rela.data.rel.ro) }
+ .rel.data 0 : { *(.rel.data) }
+ .rela.data 0 : { *(.rela.data) }
+ .rel.tdata 0 : { *(.rel.tdata) }
+ .rela.tdata 0 : { *(.rela.tdata) }
+ .rel.tbss 0 : { *(.rel.tbss) }
+ .rela.tbss 0 : { *(.rela.tbss) }
+ .rel.ctors 0 : { *(.rel.ctors) }
+ .rela.ctors 0 : { *(.rela.ctors) }
+ .rel.dtors 0 : { *(.rel.dtors) }
+ .rela.dtors 0 : { *(.rela.dtors) }
+ .rel.got 0 : { *(.rel.got) }
+ .rela.got 0 : { *(.rela.got) }
+ .rel.bss 0 : { *(.rel.bss) }
+ .rela.bss 0 : { *(.rela.bss) }
+ .rel.iplt 0 :
+ {
+ *(.rel.iplt)
+ }
+ .rela.iplt 0 :
+ {
+ *(.rela.iplt)
+ }
+ .rel.plt 0 :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt 0 :
+ {
+ *(.rela.plt)
+ }
+ .init 0 :
+ {
+ KEEP (*(.init))
+ }
+ .plt 0 : { *(.plt) }
+ .iplt 0 : { *(.iplt) }
+ .text 0 :
+ {
+ *(.text .stub)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ }
+ .fini 0 :
+ {
+ KEEP (*(.fini))
+ }
+ .rodata 0 : { *(.rodata) }
+ .rodata1 0 : { *(.rodata1) }
+ .ARM.extab 0 : { *(.ARM.extab) }
+ .ARM.exidx 0 : { *(.ARM.exidx) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ /* Exception handling */
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata 0 : { *(.tdata) }
+ .tbss 0 : { *(.tbss) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ .preinit_array 0 :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .jcr 0 : { KEEP (*(.jcr)) }
+ .dynamic 0 : { *(.dynamic) }
+ .got 0 : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data 0 :
+ {
+ *(.data)
+ }
+ .data1 0 : { *(.data1) }
+ .bss 0 :
+ {
+ *(.dynbss)
+ *(.bss)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ }
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xs b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xs
new file mode 100644
index 0000000..f3e170f
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xs
@@ -0,0 +1,232 @@
+/* Script for ld --shared: link shared library */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = 0 + SIZEOF_HEADERS;
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ *(.rel.iplt)
+ }
+ .rela.iplt :
+ {
+ *(.rela.iplt)
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsc b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsc
new file mode 100644
index 0000000..9f72b81
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsc
@@ -0,0 +1,232 @@
+/* Script for --shared -z combreloc: shared library, combine & sort relocs */
+/* Modified for Android. */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = 0 + SIZEOF_HEADERS;
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ *(.rel.iplt)
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ *(.rela.iplt)
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsw b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsw
new file mode 100644
index 0000000..bc06374
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsw
@@ -0,0 +1,231 @@
+/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = 0 + SIZEOF_HEADERS;
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ *(.rel.iplt)
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ *(.rela.iplt)
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xu b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xu
new file mode 100644
index 0000000..7a7a607
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xu
@@ -0,0 +1,164 @@
+/* Script for ld -Ur: link w/out relocation, do create constructors */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ /* For some reason, the Solaris linker makes bad executables
+ if gld -r is used and the intermediate file has sections starting
+ at non-zero addresses. Could be a Solaris ld bug, could be a GNU ld
+ bug. But for now assigning the zero vmas works. */
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ .interp 0 : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash 0 : { *(.hash) }
+ .gnu.hash 0 : { *(.gnu.hash) }
+ .dynsym 0 : { *(.dynsym) }
+ .dynstr 0 : { *(.dynstr) }
+ .gnu.version 0 : { *(.gnu.version) }
+ .gnu.version_d 0: { *(.gnu.version_d) }
+ .gnu.version_r 0: { *(.gnu.version_r) }
+ .rel.init 0 : { *(.rel.init) }
+ .rela.init 0 : { *(.rela.init) }
+ .rel.text 0 : { *(.rel.text) }
+ .rela.text 0 : { *(.rela.text) }
+ .rel.fini 0 : { *(.rel.fini) }
+ .rela.fini 0 : { *(.rela.fini) }
+ .rel.rodata 0 : { *(.rel.rodata) }
+ .rela.rodata 0 : { *(.rela.rodata) }
+ .rel.data.rel.ro 0 : { *(.rel.data.rel.ro) }
+ .rela.data.rel.ro 0 : { *(.rela.data.rel.ro) }
+ .rel.data 0 : { *(.rel.data) }
+ .rela.data 0 : { *(.rela.data) }
+ .rel.tdata 0 : { *(.rel.tdata) }
+ .rela.tdata 0 : { *(.rela.tdata) }
+ .rel.tbss 0 : { *(.rel.tbss) }
+ .rela.tbss 0 : { *(.rela.tbss) }
+ .rel.ctors 0 : { *(.rel.ctors) }
+ .rela.ctors 0 : { *(.rela.ctors) }
+ .rel.dtors 0 : { *(.rel.dtors) }
+ .rela.dtors 0 : { *(.rela.dtors) }
+ .rel.got 0 : { *(.rel.got) }
+ .rela.got 0 : { *(.rela.got) }
+ .rel.bss 0 : { *(.rel.bss) }
+ .rela.bss 0 : { *(.rela.bss) }
+ .rel.iplt 0 :
+ {
+ *(.rel.iplt)
+ }
+ .rela.iplt 0 :
+ {
+ *(.rela.iplt)
+ }
+ .rel.plt 0 :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt 0 :
+ {
+ *(.rela.plt)
+ }
+ .init 0 :
+ {
+ KEEP (*(.init))
+ }
+ .plt 0 : { *(.plt) }
+ .iplt 0 : { *(.iplt) }
+ .text 0 :
+ {
+ *(.text .stub)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ }
+ .fini 0 :
+ {
+ KEEP (*(.fini))
+ }
+ .rodata 0 : { *(.rodata) }
+ .rodata1 0 : { *(.rodata1) }
+ .ARM.extab 0 : { *(.ARM.extab) }
+ .ARM.exidx 0 : { *(.ARM.exidx) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ /* Exception handling */
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata 0 : { *(.tdata) }
+ .tbss 0 : { *(.tbss) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ .preinit_array 0 :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .jcr 0 : { KEEP (*(.jcr)) }
+ .dynamic 0 : { *(.dynamic) }
+ .got 0 : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data 0 :
+ {
+ *(.data)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 0 : { *(.data1) }
+ .bss 0 :
+ {
+ *(.dynbss)
+ *(.bss)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ }
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xw b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xw
new file mode 100644
index 0000000..5bfb772
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xw
@@ -0,0 +1,246 @@
+/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.x b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.x
new file mode 100644
index 0000000..078fb66
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.x
@@ -0,0 +1,244 @@
+/* Default linker script, for normal executables */
+/* Modified for Android. */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .rela.iplt :
+ {
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xbn b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xbn
new file mode 100644
index 0000000..5263635
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xbn
@@ -0,0 +1,241 @@
+/* Script for -N: mix text and data on same page; don't align data */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .rela.iplt :
+ {
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = .;
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xc b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xc
new file mode 100644
index 0000000..e41cfcc
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xc
@@ -0,0 +1,246 @@
+/* Script for -z combreloc: combine and sort reloc sections */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xd b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xd
new file mode 100644
index 0000000..8bdb727
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xd
@@ -0,0 +1,243 @@
+/* Script for ld -pie: link position independent executable */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .rela.iplt :
+ {
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdc b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdc
new file mode 100644
index 0000000..dd99db4
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdc
@@ -0,0 +1,246 @@
+/* Script for -pie -z combreloc: position independent executable, combine & sort relocs */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdw b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdw
new file mode 100644
index 0000000..88ddf5d
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdw
@@ -0,0 +1,246 @@
+/* Script for -pie -z combreloc -z now -z relro: position independent executable, combine & sort relocs */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xn b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xn
new file mode 100644
index 0000000..bb7112a
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xn
@@ -0,0 +1,243 @@
+/* Script for -n: mix text and data on same page */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .rela.iplt :
+ {
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xr b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xr
new file mode 100644
index 0000000..1d8ca8f
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xr
@@ -0,0 +1,163 @@
+/* Script for ld -r: link without relocation */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ /* For some reason, the Solaris linker makes bad executables
+ if gld -r is used and the intermediate file has sections starting
+ at non-zero addresses. Could be a Solaris ld bug, could be a GNU ld
+ bug. But for now assigning the zero vmas works. */
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ .interp 0 : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash 0 : { *(.hash) }
+ .gnu.hash 0 : { *(.gnu.hash) }
+ .dynsym 0 : { *(.dynsym) }
+ .dynstr 0 : { *(.dynstr) }
+ .gnu.version 0 : { *(.gnu.version) }
+ .gnu.version_d 0: { *(.gnu.version_d) }
+ .gnu.version_r 0: { *(.gnu.version_r) }
+ .rel.init 0 : { *(.rel.init) }
+ .rela.init 0 : { *(.rela.init) }
+ .rel.text 0 : { *(.rel.text) }
+ .rela.text 0 : { *(.rela.text) }
+ .rel.fini 0 : { *(.rel.fini) }
+ .rela.fini 0 : { *(.rela.fini) }
+ .rel.rodata 0 : { *(.rel.rodata) }
+ .rela.rodata 0 : { *(.rela.rodata) }
+ .rel.data.rel.ro 0 : { *(.rel.data.rel.ro) }
+ .rela.data.rel.ro 0 : { *(.rela.data.rel.ro) }
+ .rel.data 0 : { *(.rel.data) }
+ .rela.data 0 : { *(.rela.data) }
+ .rel.tdata 0 : { *(.rel.tdata) }
+ .rela.tdata 0 : { *(.rela.tdata) }
+ .rel.tbss 0 : { *(.rel.tbss) }
+ .rela.tbss 0 : { *(.rela.tbss) }
+ .rel.ctors 0 : { *(.rel.ctors) }
+ .rela.ctors 0 : { *(.rela.ctors) }
+ .rel.dtors 0 : { *(.rel.dtors) }
+ .rela.dtors 0 : { *(.rela.dtors) }
+ .rel.got 0 : { *(.rel.got) }
+ .rela.got 0 : { *(.rela.got) }
+ .rel.bss 0 : { *(.rel.bss) }
+ .rela.bss 0 : { *(.rela.bss) }
+ .rel.iplt 0 :
+ {
+ *(.rel.iplt)
+ }
+ .rela.iplt 0 :
+ {
+ *(.rela.iplt)
+ }
+ .rel.plt 0 :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt 0 :
+ {
+ *(.rela.plt)
+ }
+ .init 0 :
+ {
+ KEEP (*(.init))
+ }
+ .plt 0 : { *(.plt) }
+ .iplt 0 : { *(.iplt) }
+ .text 0 :
+ {
+ *(.text .stub)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ }
+ .fini 0 :
+ {
+ KEEP (*(.fini))
+ }
+ .rodata 0 : { *(.rodata) }
+ .rodata1 0 : { *(.rodata1) }
+ .ARM.extab 0 : { *(.ARM.extab) }
+ .ARM.exidx 0 : { *(.ARM.exidx) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ /* Exception handling */
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata 0 : { *(.tdata) }
+ .tbss 0 : { *(.tbss) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ .preinit_array 0 :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .jcr 0 : { KEEP (*(.jcr)) }
+ .dynamic 0 : { *(.dynamic) }
+ .got 0 : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data 0 :
+ {
+ *(.data)
+ }
+ .data1 0 : { *(.data1) }
+ .bss 0 :
+ {
+ *(.dynbss)
+ *(.bss)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ }
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xs b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xs
new file mode 100644
index 0000000..1beb9d1
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xs
@@ -0,0 +1,232 @@
+/* Script for ld --shared: link shared library */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = 0 + SIZEOF_HEADERS;
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ *(.rel.iplt)
+ }
+ .rela.iplt :
+ {
+ *(.rela.iplt)
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsc b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsc
new file mode 100644
index 0000000..d89ac5e
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsc
@@ -0,0 +1,232 @@
+/* Script for --shared -z combreloc: shared library, combine & sort relocs */
+/* Modified for Android. */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = 0 + SIZEOF_HEADERS;
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ *(.rel.iplt)
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ *(.rela.iplt)
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsw b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsw
new file mode 100644
index 0000000..0569631
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsw
@@ -0,0 +1,231 @@
+/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = 0 + SIZEOF_HEADERS;
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ *(.rel.iplt)
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ *(.rela.iplt)
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xu b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xu
new file mode 100644
index 0000000..0c09568
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xu
@@ -0,0 +1,164 @@
+/* Script for ld -Ur: link w/out relocation, do create constructors */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ /* For some reason, the Solaris linker makes bad executables
+ if gld -r is used and the intermediate file has sections starting
+ at non-zero addresses. Could be a Solaris ld bug, could be a GNU ld
+ bug. But for now assigning the zero vmas works. */
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ .interp 0 : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash 0 : { *(.hash) }
+ .gnu.hash 0 : { *(.gnu.hash) }
+ .dynsym 0 : { *(.dynsym) }
+ .dynstr 0 : { *(.dynstr) }
+ .gnu.version 0 : { *(.gnu.version) }
+ .gnu.version_d 0: { *(.gnu.version_d) }
+ .gnu.version_r 0: { *(.gnu.version_r) }
+ .rel.init 0 : { *(.rel.init) }
+ .rela.init 0 : { *(.rela.init) }
+ .rel.text 0 : { *(.rel.text) }
+ .rela.text 0 : { *(.rela.text) }
+ .rel.fini 0 : { *(.rel.fini) }
+ .rela.fini 0 : { *(.rela.fini) }
+ .rel.rodata 0 : { *(.rel.rodata) }
+ .rela.rodata 0 : { *(.rela.rodata) }
+ .rel.data.rel.ro 0 : { *(.rel.data.rel.ro) }
+ .rela.data.rel.ro 0 : { *(.rela.data.rel.ro) }
+ .rel.data 0 : { *(.rel.data) }
+ .rela.data 0 : { *(.rela.data) }
+ .rel.tdata 0 : { *(.rel.tdata) }
+ .rela.tdata 0 : { *(.rela.tdata) }
+ .rel.tbss 0 : { *(.rel.tbss) }
+ .rela.tbss 0 : { *(.rela.tbss) }
+ .rel.ctors 0 : { *(.rel.ctors) }
+ .rela.ctors 0 : { *(.rela.ctors) }
+ .rel.dtors 0 : { *(.rel.dtors) }
+ .rela.dtors 0 : { *(.rela.dtors) }
+ .rel.got 0 : { *(.rel.got) }
+ .rela.got 0 : { *(.rela.got) }
+ .rel.bss 0 : { *(.rel.bss) }
+ .rela.bss 0 : { *(.rela.bss) }
+ .rel.iplt 0 :
+ {
+ *(.rel.iplt)
+ }
+ .rela.iplt 0 :
+ {
+ *(.rela.iplt)
+ }
+ .rel.plt 0 :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt 0 :
+ {
+ *(.rela.plt)
+ }
+ .init 0 :
+ {
+ KEEP (*(.init))
+ }
+ .plt 0 : { *(.plt) }
+ .iplt 0 : { *(.iplt) }
+ .text 0 :
+ {
+ *(.text .stub)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ }
+ .fini 0 :
+ {
+ KEEP (*(.fini))
+ }
+ .rodata 0 : { *(.rodata) }
+ .rodata1 0 : { *(.rodata1) }
+ .ARM.extab 0 : { *(.ARM.extab) }
+ .ARM.exidx 0 : { *(.ARM.exidx) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ /* Exception handling */
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata 0 : { *(.tdata) }
+ .tbss 0 : { *(.tbss) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ .preinit_array 0 :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .jcr 0 : { KEEP (*(.jcr)) }
+ .dynamic 0 : { *(.dynamic) }
+ .got 0 : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data 0 :
+ {
+ *(.data)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 0 : { *(.data1) }
+ .bss 0 :
+ {
+ *(.dynbss)
+ *(.bss)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ }
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xw b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xw
new file mode 100644
index 0000000..ebb050b
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xw
@@ -0,0 +1,246 @@
+/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. We want to align at exactly
+ a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array))
+ KEEP (*(EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/bin/arm-linux-androideabi-addr2line b/bin/arm-linux-androideabi-addr2line
new file mode 100755
index 0000000..b750460
--- /dev/null
+++ b/bin/arm-linux-androideabi-addr2line
Binary files differ
diff --git a/bin/arm-linux-androideabi-ar b/bin/arm-linux-androideabi-ar
new file mode 100755
index 0000000..ac5c433
--- /dev/null
+++ b/bin/arm-linux-androideabi-ar
Binary files differ
diff --git a/bin/arm-linux-androideabi-as b/bin/arm-linux-androideabi-as
new file mode 100755
index 0000000..28dc497
--- /dev/null
+++ b/bin/arm-linux-androideabi-as
Binary files differ
diff --git a/bin/arm-linux-androideabi-c++ b/bin/arm-linux-androideabi-c++
new file mode 100755
index 0000000..1a5bd6a
--- /dev/null
+++ b/bin/arm-linux-androideabi-c++
Binary files differ
diff --git a/bin/arm-linux-androideabi-c++filt b/bin/arm-linux-androideabi-c++filt
new file mode 100755
index 0000000..5b0a26f
--- /dev/null
+++ b/bin/arm-linux-androideabi-c++filt
Binary files differ
diff --git a/bin/arm-linux-androideabi-cpp b/bin/arm-linux-androideabi-cpp
new file mode 100755
index 0000000..965808d
--- /dev/null
+++ b/bin/arm-linux-androideabi-cpp
Binary files differ
diff --git a/bin/arm-linux-androideabi-elfedit b/bin/arm-linux-androideabi-elfedit
new file mode 100755
index 0000000..7e00d22
--- /dev/null
+++ b/bin/arm-linux-androideabi-elfedit
Binary files differ
diff --git a/bin/arm-linux-androideabi-g++ b/bin/arm-linux-androideabi-g++
new file mode 100755
index 0000000..ccc887f
--- /dev/null
+++ b/bin/arm-linux-androideabi-g++
Binary files differ
diff --git a/bin/arm-linux-androideabi-gcc b/bin/arm-linux-androideabi-gcc
new file mode 100755
index 0000000..7023e8c
--- /dev/null
+++ b/bin/arm-linux-androideabi-gcc
Binary files differ
diff --git a/bin/arm-linux-androideabi-gcc-4.7 b/bin/arm-linux-androideabi-gcc-4.7
new file mode 100755
index 0000000..156ca93
--- /dev/null
+++ b/bin/arm-linux-androideabi-gcc-4.7
Binary files differ
diff --git a/bin/arm-linux-androideabi-gcc-ar b/bin/arm-linux-androideabi-gcc-ar
new file mode 100755
index 0000000..e95176d
--- /dev/null
+++ b/bin/arm-linux-androideabi-gcc-ar
Binary files differ
diff --git a/bin/arm-linux-androideabi-gcc-nm b/bin/arm-linux-androideabi-gcc-nm
new file mode 100755
index 0000000..21211c9
--- /dev/null
+++ b/bin/arm-linux-androideabi-gcc-nm
Binary files differ
diff --git a/bin/arm-linux-androideabi-gcc-ranlib b/bin/arm-linux-androideabi-gcc-ranlib
new file mode 100755
index 0000000..ef0e2d6
--- /dev/null
+++ b/bin/arm-linux-androideabi-gcc-ranlib
Binary files differ
diff --git a/bin/arm-linux-androideabi-gcov b/bin/arm-linux-androideabi-gcov
new file mode 100755
index 0000000..6278a1b
--- /dev/null
+++ b/bin/arm-linux-androideabi-gcov
Binary files differ
diff --git a/bin/arm-linux-androideabi-gdb b/bin/arm-linux-androideabi-gdb
new file mode 100755
index 0000000..e92c627
--- /dev/null
+++ b/bin/arm-linux-androideabi-gdb
Binary files differ
diff --git a/bin/arm-linux-androideabi-gdbtui b/bin/arm-linux-androideabi-gdbtui
new file mode 100755
index 0000000..e59491f
--- /dev/null
+++ b/bin/arm-linux-androideabi-gdbtui
Binary files differ
diff --git a/bin/arm-linux-androideabi-gprof b/bin/arm-linux-androideabi-gprof
new file mode 100755
index 0000000..bbd1591
--- /dev/null
+++ b/bin/arm-linux-androideabi-gprof
Binary files differ
diff --git a/bin/arm-linux-androideabi-ld b/bin/arm-linux-androideabi-ld
new file mode 100755
index 0000000..a528ef0
--- /dev/null
+++ b/bin/arm-linux-androideabi-ld
Binary files differ
diff --git a/bin/arm-linux-androideabi-ld.bfd b/bin/arm-linux-androideabi-ld.bfd
new file mode 100755
index 0000000..11e39c3
--- /dev/null
+++ b/bin/arm-linux-androideabi-ld.bfd
Binary files differ
diff --git a/bin/arm-linux-androideabi-ld.gold b/bin/arm-linux-androideabi-ld.gold
new file mode 100755
index 0000000..a528ef0
--- /dev/null
+++ b/bin/arm-linux-androideabi-ld.gold
Binary files differ
diff --git a/bin/arm-linux-androideabi-nm b/bin/arm-linux-androideabi-nm
new file mode 100755
index 0000000..bbf2fa7
--- /dev/null
+++ b/bin/arm-linux-androideabi-nm
Binary files differ
diff --git a/bin/arm-linux-androideabi-objcopy b/bin/arm-linux-androideabi-objcopy
new file mode 100755
index 0000000..44cd3a2
--- /dev/null
+++ b/bin/arm-linux-androideabi-objcopy
Binary files differ
diff --git a/bin/arm-linux-androideabi-objdump b/bin/arm-linux-androideabi-objdump
new file mode 100755
index 0000000..6674ebd
--- /dev/null
+++ b/bin/arm-linux-androideabi-objdump
Binary files differ
diff --git a/bin/arm-linux-androideabi-ranlib b/bin/arm-linux-androideabi-ranlib
new file mode 100755
index 0000000..2e787b5
--- /dev/null
+++ b/bin/arm-linux-androideabi-ranlib
Binary files differ
diff --git a/bin/arm-linux-androideabi-readelf b/bin/arm-linux-androideabi-readelf
new file mode 100755
index 0000000..f7c4d8d
--- /dev/null
+++ b/bin/arm-linux-androideabi-readelf
Binary files differ
diff --git a/bin/arm-linux-androideabi-run b/bin/arm-linux-androideabi-run
new file mode 100755
index 0000000..1336232
--- /dev/null
+++ b/bin/arm-linux-androideabi-run
Binary files differ
diff --git a/bin/arm-linux-androideabi-size b/bin/arm-linux-androideabi-size
new file mode 100755
index 0000000..b8c8a11
--- /dev/null
+++ b/bin/arm-linux-androideabi-size
Binary files differ
diff --git a/bin/arm-linux-androideabi-strings b/bin/arm-linux-androideabi-strings
new file mode 100755
index 0000000..52295a4
--- /dev/null
+++ b/bin/arm-linux-androideabi-strings
Binary files differ
diff --git a/bin/arm-linux-androideabi-strip b/bin/arm-linux-androideabi-strip
new file mode 100755
index 0000000..3e40042
--- /dev/null
+++ b/bin/arm-linux-androideabi-strip
Binary files differ
diff --git a/include/ansidecl.h b/include/ansidecl.h
new file mode 100644
index 0000000..23d85bf
--- /dev/null
+++ b/include/ansidecl.h
@@ -0,0 +1,434 @@
+/* ANSI and traditional C compatability macros
+ Copyright 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
+ 2002, 2003, 2004, 2005, 2006, 2007, 2009, 2010
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* ANSI and traditional C compatibility macros
+
+ ANSI C is assumed if __STDC__ is #defined.
+
+ Macro ANSI C definition Traditional C definition
+ ----- ---- - ---------- ----------- - ----------
+ ANSI_PROTOTYPES 1 not defined
+ PTR `void *' `char *'
+ PTRCONST `void *const' `char *'
+ LONG_DOUBLE `long double' `double'
+ const not defined `'
+ volatile not defined `'
+ signed not defined `'
+ VA_START(ap, var) va_start(ap, var) va_start(ap)
+
+ Note that it is safe to write "void foo();" indicating a function
+ with no return value, in all K+R compilers we have been able to test.
+
+ For declaring functions with prototypes, we also provide these:
+
+ PARAMS ((prototype))
+ -- for functions which take a fixed number of arguments. Use this
+ when declaring the function. When defining the function, write a
+ K+R style argument list. For example:
+
+ char *strcpy PARAMS ((char *dest, char *source));
+ ...
+ char *
+ strcpy (dest, source)
+ char *dest;
+ char *source;
+ { ... }
+
+
+ VPARAMS ((prototype, ...))
+ -- for functions which take a variable number of arguments. Use
+ PARAMS to declare the function, VPARAMS to define it. For example:
+
+ int printf PARAMS ((const char *format, ...));
+ ...
+ int
+ printf VPARAMS ((const char *format, ...))
+ {
+ ...
+ }
+
+ For writing functions which take variable numbers of arguments, we
+ also provide the VA_OPEN, VA_CLOSE, and VA_FIXEDARG macros. These
+ hide the differences between K+R <varargs.h> and C89 <stdarg.h> more
+ thoroughly than the simple VA_START() macro mentioned above.
+
+ VA_OPEN and VA_CLOSE are used *instead of* va_start and va_end.
+ Immediately after VA_OPEN, put a sequence of VA_FIXEDARG calls
+ corresponding to the list of fixed arguments. Then use va_arg
+ normally to get the variable arguments, or pass your va_list object
+ around. You do not declare the va_list yourself; VA_OPEN does it
+ for you.
+
+ Here is a complete example:
+
+ int
+ printf VPARAMS ((const char *format, ...))
+ {
+ int result;
+
+ VA_OPEN (ap, format);
+ VA_FIXEDARG (ap, const char *, format);
+
+ result = vfprintf (stdout, format, ap);
+ VA_CLOSE (ap);
+
+ return result;
+ }
+
+
+ You can declare variables either before or after the VA_OPEN,
+ VA_FIXEDARG sequence. Also, VA_OPEN and VA_CLOSE are the beginning
+ and end of a block. They must appear at the same nesting level,
+ and any variables declared after VA_OPEN go out of scope at
+ VA_CLOSE. Unfortunately, with a K+R compiler, that includes the
+ argument list. You can have multiple instances of VA_OPEN/VA_CLOSE
+ pairs in a single function in case you need to traverse the
+ argument list more than once.
+
+ For ease of writing code which uses GCC extensions but needs to be
+ portable to other compilers, we provide the GCC_VERSION macro that
+ simplifies testing __GNUC__ and __GNUC_MINOR__ together, and various
+ wrappers around __attribute__. Also, __extension__ will be #defined
+ to nothing if it doesn't work. See below.
+
+ This header also defines a lot of obsolete macros:
+ CONST, VOLATILE, SIGNED, PROTO, EXFUN, DEFUN, DEFUN_VOID,
+ AND, DOTS, NOARGS. Don't use them. */
+
+#ifndef _ANSIDECL_H
+#define _ANSIDECL_H 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Every source file includes this file,
+ so they will all get the switch for lint. */
+/* LINTLIBRARY */
+
+/* Using MACRO(x,y) in cpp #if conditionals does not work with some
+ older preprocessors. Thus we can't define something like this:
+
+#define HAVE_GCC_VERSION(MAJOR, MINOR) \
+ (__GNUC__ > (MAJOR) || (__GNUC__ == (MAJOR) && __GNUC_MINOR__ >= (MINOR)))
+
+and then test "#if HAVE_GCC_VERSION(2,7)".
+
+So instead we use the macro below and test it against specific values. */
+
+/* This macro simplifies testing whether we are using gcc, and if it
+ is of a particular minimum version. (Both major & minor numbers are
+ significant.) This macro will evaluate to 0 if we are not using
+ gcc at all. */
+#ifndef GCC_VERSION
+#define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__)
+#endif /* GCC_VERSION */
+
+#if defined (__STDC__) || defined(__cplusplus) || defined (_AIX) || (defined (__mips) && defined (_SYSTYPE_SVR4)) || defined(_WIN32)
+/* All known AIX compilers implement these things (but don't always
+ define __STDC__). The RISC/OS MIPS compiler defines these things
+ in SVR4 mode, but does not define __STDC__. */
+/* eraxxon@alumni.rice.edu: The Compaq C++ compiler, unlike many other
+ C++ compilers, does not define __STDC__, though it acts as if this
+ was so. (Verified versions: 5.7, 6.2, 6.3, 6.5) */
+
+#define ANSI_PROTOTYPES 1
+#define PTR void *
+#define PTRCONST void *const
+#define LONG_DOUBLE long double
+
+/* PARAMS is often defined elsewhere (e.g. by libintl.h), so wrap it in
+ a #ifndef. */
+#ifndef PARAMS
+#define PARAMS(ARGS) ARGS
+#endif
+
+#define VPARAMS(ARGS) ARGS
+#define VA_START(VA_LIST, VAR) va_start(VA_LIST, VAR)
+
+/* variadic function helper macros */
+/* "struct Qdmy" swallows the semicolon after VA_OPEN/VA_FIXEDARG's
+ use without inhibiting further decls and without declaring an
+ actual variable. */
+#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP, VAR); { struct Qdmy
+#define VA_CLOSE(AP) } va_end(AP); }
+#define VA_FIXEDARG(AP, T, N) struct Qdmy
+
+#undef const
+#undef volatile
+#undef signed
+
+/* inline requires special treatment; it's in C99, and GCC >=2.7 supports
+ it too, but it's not in C89. */
+#undef inline
+#if __STDC_VERSION__ >= 199901L || defined(__cplusplus) || (defined(__SUNPRO_C) && defined(__C99FEATURES__))
+/* it's a keyword */
+#else
+# if GCC_VERSION >= 2007
+# define inline __inline__ /* __inline__ prevents -pedantic warnings */
+# else
+# define inline /* nothing */
+# endif
+#endif
+
+/* These are obsolete. Do not use. */
+#ifndef IN_GCC
+#define CONST const
+#define VOLATILE volatile
+#define SIGNED signed
+
+#define PROTO(type, name, arglist) type name arglist
+#define EXFUN(name, proto) name proto
+#define DEFUN(name, arglist, args) name(args)
+#define DEFUN_VOID(name) name(void)
+#define AND ,
+#define DOTS , ...
+#define NOARGS void
+#endif /* ! IN_GCC */
+
+#else /* Not ANSI C. */
+
+#undef ANSI_PROTOTYPES
+#define PTR char *
+#define PTRCONST PTR
+#define LONG_DOUBLE double
+
+#define PARAMS(args) ()
+#define VPARAMS(args) (va_alist) va_dcl
+#define VA_START(va_list, var) va_start(va_list)
+
+#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP); { struct Qdmy
+#define VA_CLOSE(AP) } va_end(AP); }
+#define VA_FIXEDARG(AP, TYPE, NAME) TYPE NAME = va_arg(AP, TYPE)
+
+/* some systems define these in header files for non-ansi mode */
+#undef const
+#undef volatile
+#undef signed
+#undef inline
+#define const
+#define volatile
+#define signed
+#define inline
+
+#ifndef IN_GCC
+#define CONST
+#define VOLATILE
+#define SIGNED
+
+#define PROTO(type, name, arglist) type name ()
+#define EXFUN(name, proto) name()
+#define DEFUN(name, arglist, args) name arglist args;
+#define DEFUN_VOID(name) name()
+#define AND ;
+#define DOTS
+#define NOARGS
+#endif /* ! IN_GCC */
+
+#endif /* ANSI C. */
+
+/* Define macros for some gcc attributes. This permits us to use the
+ macros freely, and know that they will come into play for the
+ version of gcc in which they are supported. */
+
+#if (GCC_VERSION < 2007)
+# define __attribute__(x)
+#endif
+
+/* Attribute __malloc__ on functions was valid as of gcc 2.96. */
+#ifndef ATTRIBUTE_MALLOC
+# if (GCC_VERSION >= 2096)
+# define ATTRIBUTE_MALLOC __attribute__ ((__malloc__))
+# else
+# define ATTRIBUTE_MALLOC
+# endif /* GNUC >= 2.96 */
+#endif /* ATTRIBUTE_MALLOC */
+
+/* Attributes on labels were valid as of gcc 2.93 and g++ 4.5. For
+ g++ an attribute on a label must be followed by a semicolon. */
+#ifndef ATTRIBUTE_UNUSED_LABEL
+# ifndef __cplusplus
+# if GCC_VERSION >= 2093
+# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED
+# else
+# define ATTRIBUTE_UNUSED_LABEL
+# endif
+# else
+# if GCC_VERSION >= 4005
+# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED ;
+# else
+# define ATTRIBUTE_UNUSED_LABEL
+# endif
+# endif
+#endif
+
+#ifndef ATTRIBUTE_UNUSED
+#define ATTRIBUTE_UNUSED __attribute__ ((__unused__))
+#endif /* ATTRIBUTE_UNUSED */
+
+/* Before GCC 3.4, the C++ frontend couldn't parse attributes placed after the
+ identifier name. */
+#if ! defined(__cplusplus) || (GCC_VERSION >= 3004)
+# define ARG_UNUSED(NAME) NAME ATTRIBUTE_UNUSED
+#else /* !__cplusplus || GNUC >= 3.4 */
+# define ARG_UNUSED(NAME) NAME
+#endif /* !__cplusplus || GNUC >= 3.4 */
+
+#ifndef ATTRIBUTE_NORETURN
+#define ATTRIBUTE_NORETURN __attribute__ ((__noreturn__))
+#endif /* ATTRIBUTE_NORETURN */
+
+/* Attribute `nonnull' was valid as of gcc 3.3. */
+#ifndef ATTRIBUTE_NONNULL
+# if (GCC_VERSION >= 3003)
+# define ATTRIBUTE_NONNULL(m) __attribute__ ((__nonnull__ (m)))
+# else
+# define ATTRIBUTE_NONNULL(m)
+# endif /* GNUC >= 3.3 */
+#endif /* ATTRIBUTE_NONNULL */
+
+/* Attribute `pure' was valid as of gcc 3.0. */
+#ifndef ATTRIBUTE_PURE
+# if (GCC_VERSION >= 3000)
+# define ATTRIBUTE_PURE __attribute__ ((__pure__))
+# else
+# define ATTRIBUTE_PURE
+# endif /* GNUC >= 3.0 */
+#endif /* ATTRIBUTE_PURE */
+
+/* Use ATTRIBUTE_PRINTF when the format specifier must not be NULL.
+ This was the case for the `printf' format attribute by itself
+ before GCC 3.3, but as of 3.3 we need to add the `nonnull'
+ attribute to retain this behavior. */
+#ifndef ATTRIBUTE_PRINTF
+#define ATTRIBUTE_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) ATTRIBUTE_NONNULL(m)
+#define ATTRIBUTE_PRINTF_1 ATTRIBUTE_PRINTF(1, 2)
+#define ATTRIBUTE_PRINTF_2 ATTRIBUTE_PRINTF(2, 3)
+#define ATTRIBUTE_PRINTF_3 ATTRIBUTE_PRINTF(3, 4)
+#define ATTRIBUTE_PRINTF_4 ATTRIBUTE_PRINTF(4, 5)
+#define ATTRIBUTE_PRINTF_5 ATTRIBUTE_PRINTF(5, 6)
+#endif /* ATTRIBUTE_PRINTF */
+
+/* Use ATTRIBUTE_FPTR_PRINTF when the format attribute is to be set on
+ a function pointer. Format attributes were allowed on function
+ pointers as of gcc 3.1. */
+#ifndef ATTRIBUTE_FPTR_PRINTF
+# if (GCC_VERSION >= 3001)
+# define ATTRIBUTE_FPTR_PRINTF(m, n) ATTRIBUTE_PRINTF(m, n)
+# else
+# define ATTRIBUTE_FPTR_PRINTF(m, n)
+# endif /* GNUC >= 3.1 */
+# define ATTRIBUTE_FPTR_PRINTF_1 ATTRIBUTE_FPTR_PRINTF(1, 2)
+# define ATTRIBUTE_FPTR_PRINTF_2 ATTRIBUTE_FPTR_PRINTF(2, 3)
+# define ATTRIBUTE_FPTR_PRINTF_3 ATTRIBUTE_FPTR_PRINTF(3, 4)
+# define ATTRIBUTE_FPTR_PRINTF_4 ATTRIBUTE_FPTR_PRINTF(4, 5)
+# define ATTRIBUTE_FPTR_PRINTF_5 ATTRIBUTE_FPTR_PRINTF(5, 6)
+#endif /* ATTRIBUTE_FPTR_PRINTF */
+
+/* Use ATTRIBUTE_NULL_PRINTF when the format specifier may be NULL. A
+ NULL format specifier was allowed as of gcc 3.3. */
+#ifndef ATTRIBUTE_NULL_PRINTF
+# if (GCC_VERSION >= 3003)
+# define ATTRIBUTE_NULL_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n)))
+# else
+# define ATTRIBUTE_NULL_PRINTF(m, n)
+# endif /* GNUC >= 3.3 */
+# define ATTRIBUTE_NULL_PRINTF_1 ATTRIBUTE_NULL_PRINTF(1, 2)
+# define ATTRIBUTE_NULL_PRINTF_2 ATTRIBUTE_NULL_PRINTF(2, 3)
+# define ATTRIBUTE_NULL_PRINTF_3 ATTRIBUTE_NULL_PRINTF(3, 4)
+# define ATTRIBUTE_NULL_PRINTF_4 ATTRIBUTE_NULL_PRINTF(4, 5)
+# define ATTRIBUTE_NULL_PRINTF_5 ATTRIBUTE_NULL_PRINTF(5, 6)
+#endif /* ATTRIBUTE_NULL_PRINTF */
+
+/* Attribute `sentinel' was valid as of gcc 3.5. */
+#ifndef ATTRIBUTE_SENTINEL
+# if (GCC_VERSION >= 3005)
+# define ATTRIBUTE_SENTINEL __attribute__ ((__sentinel__))
+# else
+# define ATTRIBUTE_SENTINEL
+# endif /* GNUC >= 3.5 */
+#endif /* ATTRIBUTE_SENTINEL */
+
+
+#ifndef ATTRIBUTE_ALIGNED_ALIGNOF
+# if (GCC_VERSION >= 3000)
+# define ATTRIBUTE_ALIGNED_ALIGNOF(m) __attribute__ ((__aligned__ (__alignof__ (m))))
+# else
+# define ATTRIBUTE_ALIGNED_ALIGNOF(m)
+# endif /* GNUC >= 3.0 */
+#endif /* ATTRIBUTE_ALIGNED_ALIGNOF */
+
+/* Useful for structures whose layout must much some binary specification
+ regardless of the alignment and padding qualities of the compiler. */
+#ifndef ATTRIBUTE_PACKED
+# define ATTRIBUTE_PACKED __attribute__ ((packed))
+#endif
+
+/* Attribute `hot' and `cold' was valid as of gcc 4.3. */
+#ifndef ATTRIBUTE_COLD
+# if (GCC_VERSION >= 4003)
+# define ATTRIBUTE_COLD __attribute__ ((__cold__))
+# else
+# define ATTRIBUTE_COLD
+# endif /* GNUC >= 4.3 */
+#endif /* ATTRIBUTE_COLD */
+#ifndef ATTRIBUTE_HOT
+# if (GCC_VERSION >= 4003)
+# define ATTRIBUTE_HOT __attribute__ ((__hot__))
+# else
+# define ATTRIBUTE_HOT
+# endif /* GNUC >= 4.3 */
+#endif /* ATTRIBUTE_HOT */
+
+/* We use __extension__ in some places to suppress -pedantic warnings
+ about GCC extensions. This feature didn't work properly before
+ gcc 2.8. */
+#if GCC_VERSION < 2008
+#define __extension__
+#endif
+
+/* This is used to declare a const variable which should be visible
+ outside of the current compilation unit. Use it as
+ EXPORTED_CONST int i = 1;
+ This is because the semantics of const are different in C and C++.
+ "extern const" is permitted in C but it looks strange, and gcc
+ warns about it when -Wc++-compat is not used. */
+#ifdef __cplusplus
+#define EXPORTED_CONST extern const
+#else
+#define EXPORTED_CONST const
+#endif
+
+/* Be conservative and only use enum bitfields with C++ or GCC.
+ FIXME: provide a complete autoconf test for buggy enum bitfields. */
+
+#ifdef __cplusplus
+#define ENUM_BITFIELD(TYPE) enum TYPE
+#elif (GCC_VERSION > 2000)
+#define ENUM_BITFIELD(TYPE) __extension__ enum TYPE
+#else
+#define ENUM_BITFIELD(TYPE) unsigned int
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ansidecl.h */
diff --git a/include/bfd.h b/include/bfd.h
new file mode 100644
index 0000000..1c5f96d
--- /dev/null
+++ b/include/bfd.h
@@ -0,0 +1,6461 @@
+/* DO NOT EDIT! -*- buffer-read-only: t -*- This file is automatically
+ generated from "bfd-in.h", "init.c", "opncls.c", "libbfd.c",
+ "bfdio.c", "bfdwin.c", "section.c", "archures.c", "reloc.c",
+ "syms.c", "bfd.c", "archive.c", "corefile.c", "targets.c", "format.c",
+ "linker.c", "simple.c" and "compress.c".
+ Run "make headers" in your build bfd/ to regenerate. */
+
+/* Main header file for the bfd library -- portable access to object files.
+
+ Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
+ 2012 Free Software Foundation, Inc.
+
+ Contributed by Cygnus Support.
+
+ This file is part of BFD, the Binary File Descriptor library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef __BFD_H_SEEN__
+#define __BFD_H_SEEN__
+
+/* PR 14072: Ensure that config.h is included first. */
+#if !defined PACKAGE && !defined PACKAGE_VERSION
+#error config.h must be included before this header
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "ansidecl.h"
+#include "symcat.h"
+#include <sys/stat.h>
+
+#if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE)
+#ifndef SABER
+/* This hack is to avoid a problem with some strict ANSI C preprocessors.
+ The problem is, "32_" is not a valid preprocessing token, and we don't
+ want extra underscores (e.g., "nlm_32_"). The XCONCAT2 macro will
+ cause the inner CONCAT2 macros to be evaluated first, producing
+ still-valid pp-tokens. Then the final concatenation can be done. */
+#undef CONCAT4
+#define CONCAT4(a,b,c,d) XCONCAT2(CONCAT2(a,b),CONCAT2(c,d))
+#endif
+#endif
+
+/* This is a utility macro to handle the situation where the code
+ wants to place a constant string into the code, followed by a
+ comma and then the length of the string. Doing this by hand
+ is error prone, so using this macro is safer. */
+#define STRING_COMMA_LEN(STR) (STR), (sizeof (STR) - 1)
+/* Unfortunately it is not possible to use the STRING_COMMA_LEN macro
+ to create the arguments to another macro, since the preprocessor
+ will mis-count the number of arguments to the outer macro (by not
+ evaluating STRING_COMMA_LEN and so missing the comma). This is a
+ problem for example when trying to use STRING_COMMA_LEN to build
+ the arguments to the strncmp() macro. Hence this alternative
+ definition of strncmp is provided here.
+
+ Note - these macros do NOT work if STR2 is not a constant string. */
+#define CONST_STRNEQ(STR1,STR2) (strncmp ((STR1), (STR2), sizeof (STR2) - 1) == 0)
+ /* strcpy() can have a similar problem, but since we know we are
+ copying a constant string, we can use memcpy which will be faster
+ since there is no need to check for a NUL byte inside STR. We
+ can also save time if we do not need to copy the terminating NUL. */
+#define LITMEMCPY(DEST,STR2) memcpy ((DEST), (STR2), sizeof (STR2) - 1)
+#define LITSTRCPY(DEST,STR2) memcpy ((DEST), (STR2), sizeof (STR2))
+
+
+#define BFD_SUPPORTS_PLUGINS 0
+
+/* The word size used by BFD on the host. This may be 64 with a 32
+ bit target if the host is 64 bit, or if other 64 bit targets have
+ been selected with --enable-targets, or if --enable-64-bit-bfd. */
+#define BFD_ARCH_SIZE 32
+
+/* The word size of the default bfd target. */
+#define BFD_DEFAULT_TARGET_SIZE 32
+
+#define BFD_HOST_64BIT_LONG 0
+#define BFD_HOST_64BIT_LONG_LONG 1
+#if 1
+#define BFD_HOST_64_BIT long long
+#define BFD_HOST_U_64_BIT unsigned long long
+typedef BFD_HOST_64_BIT bfd_int64_t;
+typedef BFD_HOST_U_64_BIT bfd_uint64_t;
+#endif
+
+#if BFD_ARCH_SIZE >= 64
+#define BFD64
+#endif
+
+#ifndef INLINE
+#if __GNUC__ >= 2
+#define INLINE __inline__
+#else
+#define INLINE
+#endif
+#endif
+
+/* Declaring a type wide enough to hold a host long and a host pointer. */
+#define BFD_HOSTPTR_T unsigned long
+typedef BFD_HOSTPTR_T bfd_hostptr_t;
+
+/* Forward declaration. */
+typedef struct bfd bfd;
+
+/* Boolean type used in bfd. Too many systems define their own
+ versions of "boolean" for us to safely typedef a "boolean" of
+ our own. Using an enum for "bfd_boolean" has its own set of
+ problems, with strange looking casts required to avoid warnings
+ on some older compilers. Thus we just use an int.
+
+ General rule: Functions which are bfd_boolean return TRUE on
+ success and FALSE on failure (unless they're a predicate). */
+
+typedef int bfd_boolean;
+#undef FALSE
+#undef TRUE
+#define FALSE 0
+#define TRUE 1
+
+#ifdef BFD64
+
+#ifndef BFD_HOST_64_BIT
+ #error No 64 bit integer type available
+#endif /* ! defined (BFD_HOST_64_BIT) */
+
+typedef BFD_HOST_U_64_BIT bfd_vma;
+typedef BFD_HOST_64_BIT bfd_signed_vma;
+typedef BFD_HOST_U_64_BIT bfd_size_type;
+typedef BFD_HOST_U_64_BIT symvalue;
+
+#if BFD_HOST_64BIT_LONG
+#define BFD_VMA_FMT "l"
+#elif defined (__MSVCRT__)
+#define BFD_VMA_FMT "I64"
+#else
+#define BFD_VMA_FMT "ll"
+#endif
+
+#ifndef fprintf_vma
+#define sprintf_vma(s,x) sprintf (s, "%016" BFD_VMA_FMT "x", x)
+#define fprintf_vma(f,x) fprintf (f, "%016" BFD_VMA_FMT "x", x)
+#endif
+
+#else /* not BFD64 */
+
+/* Represent a target address. Also used as a generic unsigned type
+ which is guaranteed to be big enough to hold any arithmetic types
+ we need to deal with. */
+typedef unsigned long bfd_vma;
+
+/* A generic signed type which is guaranteed to be big enough to hold any
+ arithmetic types we need to deal with. Can be assumed to be compatible
+ with bfd_vma in the same way that signed and unsigned ints are compatible
+ (as parameters, in assignment, etc). */
+typedef long bfd_signed_vma;
+
+typedef unsigned long symvalue;
+typedef unsigned long bfd_size_type;
+
+/* Print a bfd_vma x on stream s. */
+#define BFD_VMA_FMT "l"
+#define fprintf_vma(s,x) fprintf (s, "%08" BFD_VMA_FMT "x", x)
+#define sprintf_vma(s,x) sprintf (s, "%08" BFD_VMA_FMT "x", x)
+
+#endif /* not BFD64 */
+
+#define HALF_BFD_SIZE_TYPE \
+ (((bfd_size_type) 1) << (8 * sizeof (bfd_size_type) / 2))
+
+#ifndef BFD_HOST_64_BIT
+/* Fall back on a 32 bit type. The idea is to make these types always
+ available for function return types, but in the case that
+ BFD_HOST_64_BIT is undefined such a function should abort or
+ otherwise signal an error. */
+typedef bfd_signed_vma bfd_int64_t;
+typedef bfd_vma bfd_uint64_t;
+#endif
+
+/* An offset into a file. BFD always uses the largest possible offset
+ based on the build time availability of fseek, fseeko, or fseeko64. */
+typedef BFD_HOST_64_BIT file_ptr;
+typedef unsigned BFD_HOST_64_BIT ufile_ptr;
+
+extern void bfd_sprintf_vma (bfd *, char *, bfd_vma);
+extern void bfd_fprintf_vma (bfd *, void *, bfd_vma);
+
+#define printf_vma(x) fprintf_vma(stdout,x)
+#define bfd_printf_vma(abfd,x) bfd_fprintf_vma (abfd,stdout,x)
+
+typedef unsigned int flagword; /* 32 bits of flags */
+typedef unsigned char bfd_byte;
+
+/* File formats. */
+
+typedef enum bfd_format
+{
+ bfd_unknown = 0, /* File format is unknown. */
+ bfd_object, /* Linker/assembler/compiler output. */
+ bfd_archive, /* Object archive file. */
+ bfd_core, /* Core dump. */
+ bfd_type_end /* Marks the end; don't use it! */
+}
+bfd_format;
+
+/* Symbols and relocation. */
+
+/* A count of carsyms (canonical archive symbols). */
+typedef unsigned long symindex;
+
+/* How to perform a relocation. */
+typedef const struct reloc_howto_struct reloc_howto_type;
+
+#define BFD_NO_MORE_SYMBOLS ((symindex) ~0)
+
+/* General purpose part of a symbol X;
+ target specific parts are in libcoff.h, libaout.h, etc. */
+
+#define bfd_get_section(x) ((x)->section)
+#define bfd_get_output_section(x) ((x)->section->output_section)
+#define bfd_set_section(x,y) ((x)->section) = (y)
+#define bfd_asymbol_base(x) ((x)->section->vma)
+#define bfd_asymbol_value(x) (bfd_asymbol_base(x) + (x)->value)
+#define bfd_asymbol_name(x) ((x)->name)
+/*Perhaps future: #define bfd_asymbol_bfd(x) ((x)->section->owner)*/
+#define bfd_asymbol_bfd(x) ((x)->the_bfd)
+#define bfd_asymbol_flavour(x) \
+ (((x)->flags & BSF_SYNTHETIC) != 0 \
+ ? bfd_target_unknown_flavour \
+ : bfd_asymbol_bfd (x)->xvec->flavour)
+
+/* A canonical archive symbol. */
+/* This is a type pun with struct ranlib on purpose! */
+typedef struct carsym
+{
+ char *name;
+ file_ptr file_offset; /* Look here to find the file. */
+}
+carsym; /* To make these you call a carsymogen. */
+
+/* Used in generating armaps (archive tables of contents).
+ Perhaps just a forward definition would do? */
+struct orl /* Output ranlib. */
+{
+ char **name; /* Symbol name. */
+ union
+ {
+ file_ptr pos;
+ bfd *abfd;
+ } u; /* bfd* or file position. */
+ int namidx; /* Index into string table. */
+};
+
+/* Linenumber stuff. */
+typedef struct lineno_cache_entry
+{
+ unsigned int line_number; /* Linenumber from start of function. */
+ union
+ {
+ struct bfd_symbol *sym; /* Function name. */
+ bfd_vma offset; /* Offset into section. */
+ } u;
+}
+alent;
+
+/* Object and core file sections. */
+
+#define align_power(addr, align) \
+ (((addr) + ((bfd_vma) 1 << (align)) - 1) & ((bfd_vma) -1 << (align)))
+
+typedef struct bfd_section *sec_ptr;
+
+#define bfd_get_section_name(bfd, ptr) ((void) bfd, (ptr)->name)
+#define bfd_get_section_vma(bfd, ptr) ((void) bfd, (ptr)->vma)
+#define bfd_get_section_lma(bfd, ptr) ((void) bfd, (ptr)->lma)
+#define bfd_get_section_alignment(bfd, ptr) ((void) bfd, \
+ (ptr)->alignment_power)
+#define bfd_section_name(bfd, ptr) ((ptr)->name)
+#define bfd_section_size(bfd, ptr) ((ptr)->size)
+#define bfd_get_section_size(ptr) ((ptr)->size)
+#define bfd_section_vma(bfd, ptr) ((ptr)->vma)
+#define bfd_section_lma(bfd, ptr) ((ptr)->lma)
+#define bfd_section_alignment(bfd, ptr) ((ptr)->alignment_power)
+#define bfd_get_section_flags(bfd, ptr) ((void) bfd, (ptr)->flags)
+#define bfd_get_section_userdata(bfd, ptr) ((void) bfd, (ptr)->userdata)
+
+#define bfd_is_com_section(ptr) (((ptr)->flags & SEC_IS_COMMON) != 0)
+
+#define bfd_set_section_vma(bfd, ptr, val) (((ptr)->vma = (ptr)->lma = (val)), ((ptr)->user_set_vma = TRUE), TRUE)
+#define bfd_set_section_alignment(bfd, ptr, val) (((ptr)->alignment_power = (val)),TRUE)
+#define bfd_set_section_userdata(bfd, ptr, val) (((ptr)->userdata = (val)),TRUE)
+/* Find the address one past the end of SEC. */
+#define bfd_get_section_limit(bfd, sec) \
+ (((bfd)->direction != write_direction && (sec)->rawsize != 0 \
+ ? (sec)->rawsize : (sec)->size) / bfd_octets_per_byte (bfd))
+
+/* Return TRUE if input section SEC has been discarded. */
+#define discarded_section(sec) \
+ (!bfd_is_abs_section (sec) \
+ && bfd_is_abs_section ((sec)->output_section) \
+ && (sec)->sec_info_type != SEC_INFO_TYPE_MERGE \
+ && (sec)->sec_info_type != SEC_INFO_TYPE_JUST_SYMS)
+
+typedef enum bfd_print_symbol
+{
+ bfd_print_symbol_name,
+ bfd_print_symbol_more,
+ bfd_print_symbol_all
+} bfd_print_symbol_type;
+
+/* Information about a symbol that nm needs. */
+
+typedef struct _symbol_info
+{
+ symvalue value;
+ char type;
+ const char *name; /* Symbol name. */
+ unsigned char stab_type; /* Stab type. */
+ char stab_other; /* Stab other. */
+ short stab_desc; /* Stab desc. */
+ const char *stab_name; /* String for stab type. */
+} symbol_info;
+
+/* Get the name of a stabs type code. */
+
+extern const char *bfd_get_stab_name (int);
+
+/* Hash table routines. There is no way to free up a hash table. */
+
+/* An element in the hash table. Most uses will actually use a larger
+ structure, and an instance of this will be the first field. */
+
+struct bfd_hash_entry
+{
+ /* Next entry for this hash code. */
+ struct bfd_hash_entry *next;
+ /* String being hashed. */
+ const char *string;
+ /* Hash code. This is the full hash code, not the index into the
+ table. */
+ unsigned long hash;
+};
+
+/* A hash table. */
+
+struct bfd_hash_table
+{
+ /* The hash array. */
+ struct bfd_hash_entry **table;
+ /* A function used to create new elements in the hash table. The
+ first entry is itself a pointer to an element. When this
+ function is first invoked, this pointer will be NULL. However,
+ having the pointer permits a hierarchy of method functions to be
+ built each of which calls the function in the superclass. Thus
+ each function should be written to allocate a new block of memory
+ only if the argument is NULL. */
+ struct bfd_hash_entry *(*newfunc)
+ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *);
+ /* An objalloc for this hash table. This is a struct objalloc *,
+ but we use void * to avoid requiring the inclusion of objalloc.h. */
+ void *memory;
+ /* The number of slots in the hash table. */
+ unsigned int size;
+ /* The number of entries in the hash table. */
+ unsigned int count;
+ /* The size of elements. */
+ unsigned int entsize;
+ /* If non-zero, don't grow the hash table. */
+ unsigned int frozen:1;
+};
+
+/* Initialize a hash table. */
+extern bfd_boolean bfd_hash_table_init
+ (struct bfd_hash_table *,
+ struct bfd_hash_entry *(*) (struct bfd_hash_entry *,
+ struct bfd_hash_table *,
+ const char *),
+ unsigned int);
+
+/* Initialize a hash table specifying a size. */
+extern bfd_boolean bfd_hash_table_init_n
+ (struct bfd_hash_table *,
+ struct bfd_hash_entry *(*) (struct bfd_hash_entry *,
+ struct bfd_hash_table *,
+ const char *),
+ unsigned int, unsigned int);
+
+/* Free up a hash table. */
+extern void bfd_hash_table_free
+ (struct bfd_hash_table *);
+
+/* Look up a string in a hash table. If CREATE is TRUE, a new entry
+ will be created for this string if one does not already exist. The
+ COPY argument must be TRUE if this routine should copy the string
+ into newly allocated memory when adding an entry. */
+extern struct bfd_hash_entry *bfd_hash_lookup
+ (struct bfd_hash_table *, const char *, bfd_boolean create,
+ bfd_boolean copy);
+
+/* Insert an entry in a hash table. */
+extern struct bfd_hash_entry *bfd_hash_insert
+ (struct bfd_hash_table *, const char *, unsigned long);
+
+/* Rename an entry in a hash table. */
+extern void bfd_hash_rename
+ (struct bfd_hash_table *, const char *, struct bfd_hash_entry *);
+
+/* Replace an entry in a hash table. */
+extern void bfd_hash_replace
+ (struct bfd_hash_table *, struct bfd_hash_entry *old,
+ struct bfd_hash_entry *nw);
+
+/* Base method for creating a hash table entry. */
+extern struct bfd_hash_entry *bfd_hash_newfunc
+ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *);
+
+/* Grab some space for a hash table entry. */
+extern void *bfd_hash_allocate
+ (struct bfd_hash_table *, unsigned int);
+
+/* Traverse a hash table in a random order, calling a function on each
+ element. If the function returns FALSE, the traversal stops. The
+ INFO argument is passed to the function. */
+extern void bfd_hash_traverse
+ (struct bfd_hash_table *,
+ bfd_boolean (*) (struct bfd_hash_entry *, void *),
+ void *info);
+
+/* Allows the default size of a hash table to be configured. New hash
+ tables allocated using bfd_hash_table_init will be created with
+ this size. */
+extern unsigned long bfd_hash_set_default_size (unsigned long);
+
+/* This structure is used to keep track of stabs in sections
+ information while linking. */
+
+struct stab_info
+{
+ /* A hash table used to hold stabs strings. */
+ struct bfd_strtab_hash *strings;
+ /* The header file hash table. */
+ struct bfd_hash_table includes;
+ /* The first .stabstr section. */
+ struct bfd_section *stabstr;
+};
+
+#define COFF_SWAP_TABLE (void *) &bfd_coff_std_swap_table
+
+/* User program access to BFD facilities. */
+
+/* Direct I/O routines, for programs which know more about the object
+ file than BFD does. Use higher level routines if possible. */
+
+extern bfd_size_type bfd_bread (void *, bfd_size_type, bfd *);
+extern bfd_size_type bfd_bwrite (const void *, bfd_size_type, bfd *);
+extern int bfd_seek (bfd *, file_ptr, int);
+extern file_ptr bfd_tell (bfd *);
+extern int bfd_flush (bfd *);
+extern int bfd_stat (bfd *, struct stat *);
+
+/* Deprecated old routines. */
+#if __GNUC__
+#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \
+ (warn_deprecated ("bfd_read", __FILE__, __LINE__, __FUNCTION__), \
+ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
+#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \
+ (warn_deprecated ("bfd_write", __FILE__, __LINE__, __FUNCTION__), \
+ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
+#else
+#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \
+ (warn_deprecated ("bfd_read", (const char *) 0, 0, (const char *) 0), \
+ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
+#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \
+ (warn_deprecated ("bfd_write", (const char *) 0, 0, (const char *) 0),\
+ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
+#endif
+extern void warn_deprecated (const char *, const char *, int, const char *);
+
+/* Cast from const char * to char * so that caller can assign to
+ a char * without a warning. */
+#define bfd_get_filename(abfd) ((char *) (abfd)->filename)
+#define bfd_get_cacheable(abfd) ((abfd)->cacheable)
+#define bfd_get_format(abfd) ((abfd)->format)
+#define bfd_get_target(abfd) ((abfd)->xvec->name)
+#define bfd_get_flavour(abfd) ((abfd)->xvec->flavour)
+#define bfd_family_coff(abfd) \
+ (bfd_get_flavour (abfd) == bfd_target_coff_flavour || \
+ bfd_get_flavour (abfd) == bfd_target_xcoff_flavour)
+#define bfd_big_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG)
+#define bfd_little_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_LITTLE)
+#define bfd_header_big_endian(abfd) \
+ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_BIG)
+#define bfd_header_little_endian(abfd) \
+ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_LITTLE)
+#define bfd_get_file_flags(abfd) ((abfd)->flags)
+#define bfd_applicable_file_flags(abfd) ((abfd)->xvec->object_flags)
+#define bfd_applicable_section_flags(abfd) ((abfd)->xvec->section_flags)
+#define bfd_my_archive(abfd) ((abfd)->my_archive)
+#define bfd_has_map(abfd) ((abfd)->has_armap)
+#define bfd_is_thin_archive(abfd) ((abfd)->is_thin_archive)
+
+#define bfd_valid_reloc_types(abfd) ((abfd)->xvec->valid_reloc_types)
+#define bfd_usrdata(abfd) ((abfd)->usrdata)
+
+#define bfd_get_start_address(abfd) ((abfd)->start_address)
+#define bfd_get_symcount(abfd) ((abfd)->symcount)
+#define bfd_get_outsymbols(abfd) ((abfd)->outsymbols)
+#define bfd_count_sections(abfd) ((abfd)->section_count)
+
+#define bfd_get_dynamic_symcount(abfd) ((abfd)->dynsymcount)
+
+#define bfd_get_symbol_leading_char(abfd) ((abfd)->xvec->symbol_leading_char)
+
+#define bfd_set_cacheable(abfd,bool) (((abfd)->cacheable = bool), TRUE)
+
+extern bfd_boolean bfd_cache_close
+ (bfd *abfd);
+/* NB: This declaration should match the autogenerated one in libbfd.h. */
+
+extern bfd_boolean bfd_cache_close_all (void);
+
+extern bfd_boolean bfd_record_phdr
+ (bfd *, unsigned long, bfd_boolean, flagword, bfd_boolean, bfd_vma,
+ bfd_boolean, bfd_boolean, unsigned int, struct bfd_section **);
+
+/* Byte swapping routines. */
+
+bfd_uint64_t bfd_getb64 (const void *);
+bfd_uint64_t bfd_getl64 (const void *);
+bfd_int64_t bfd_getb_signed_64 (const void *);
+bfd_int64_t bfd_getl_signed_64 (const void *);
+bfd_vma bfd_getb32 (const void *);
+bfd_vma bfd_getl32 (const void *);
+bfd_signed_vma bfd_getb_signed_32 (const void *);
+bfd_signed_vma bfd_getl_signed_32 (const void *);
+bfd_vma bfd_getb16 (const void *);
+bfd_vma bfd_getl16 (const void *);
+bfd_signed_vma bfd_getb_signed_16 (const void *);
+bfd_signed_vma bfd_getl_signed_16 (const void *);
+void bfd_putb64 (bfd_uint64_t, void *);
+void bfd_putl64 (bfd_uint64_t, void *);
+void bfd_putb32 (bfd_vma, void *);
+void bfd_putl32 (bfd_vma, void *);
+void bfd_putb16 (bfd_vma, void *);
+void bfd_putl16 (bfd_vma, void *);
+
+/* Byte swapping routines which take size and endiannes as arguments. */
+
+bfd_uint64_t bfd_get_bits (const void *, int, bfd_boolean);
+void bfd_put_bits (bfd_uint64_t, void *, int, bfd_boolean);
+
+#if defined(__STDC__) || defined(ALMOST_STDC)
+struct ecoff_debug_info;
+struct ecoff_debug_swap;
+struct ecoff_extr;
+struct bfd_symbol;
+struct bfd_link_info;
+struct bfd_link_hash_entry;
+struct bfd_section_already_linked;
+struct bfd_elf_version_tree;
+#endif
+
+extern bfd_boolean bfd_section_already_linked_table_init (void);
+extern void bfd_section_already_linked_table_free (void);
+extern bfd_boolean _bfd_handle_already_linked
+ (struct bfd_section *, struct bfd_section_already_linked *,
+ struct bfd_link_info *);
+
+/* Externally visible ECOFF routines. */
+
+extern bfd_vma bfd_ecoff_get_gp_value
+ (bfd * abfd);
+extern bfd_boolean bfd_ecoff_set_gp_value
+ (bfd *abfd, bfd_vma gp_value);
+extern bfd_boolean bfd_ecoff_set_regmasks
+ (bfd *abfd, unsigned long gprmask, unsigned long fprmask,
+ unsigned long *cprmask);
+extern void *bfd_ecoff_debug_init
+ (bfd *output_bfd, struct ecoff_debug_info *output_debug,
+ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *);
+extern void bfd_ecoff_debug_free
+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
+ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *);
+extern bfd_boolean bfd_ecoff_debug_accumulate
+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
+ const struct ecoff_debug_swap *output_swap, bfd *input_bfd,
+ struct ecoff_debug_info *input_debug,
+ const struct ecoff_debug_swap *input_swap, struct bfd_link_info *);
+extern bfd_boolean bfd_ecoff_debug_accumulate_other
+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
+ const struct ecoff_debug_swap *output_swap, bfd *input_bfd,
+ struct bfd_link_info *);
+extern bfd_boolean bfd_ecoff_debug_externals
+ (bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap, bfd_boolean relocatable,
+ bfd_boolean (*get_extr) (struct bfd_symbol *, struct ecoff_extr *),
+ void (*set_index) (struct bfd_symbol *, bfd_size_type));
+extern bfd_boolean bfd_ecoff_debug_one_external
+ (bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap, const char *name,
+ struct ecoff_extr *esym);
+extern bfd_size_type bfd_ecoff_debug_size
+ (bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap);
+extern bfd_boolean bfd_ecoff_write_debug
+ (bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap, file_ptr where);
+extern bfd_boolean bfd_ecoff_write_accumulated_debug
+ (void *handle, bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap,
+ struct bfd_link_info *info, file_ptr where);
+
+/* Externally visible ELF routines. */
+
+struct bfd_link_needed_list
+{
+ struct bfd_link_needed_list *next;
+ bfd *by;
+ const char *name;
+};
+
+enum dynamic_lib_link_class {
+ DYN_NORMAL = 0,
+ DYN_AS_NEEDED = 1,
+ DYN_DT_NEEDED = 2,
+ DYN_NO_ADD_NEEDED = 4,
+ DYN_NO_NEEDED = 8
+};
+
+enum notice_asneeded_action {
+ notice_as_needed,
+ notice_not_needed,
+ notice_needed
+};
+
+extern bfd_boolean bfd_elf_record_link_assignment
+ (bfd *, struct bfd_link_info *, const char *, bfd_boolean,
+ bfd_boolean);
+extern struct bfd_link_needed_list *bfd_elf_get_needed_list
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_elf_get_bfd_needed_list
+ (bfd *, struct bfd_link_needed_list **);
+extern bfd_boolean bfd_elf_size_dynamic_sections
+ (bfd *, const char *, const char *, const char *, const char *, const char *,
+ const char * const *, struct bfd_link_info *, struct bfd_section **);
+extern bfd_boolean bfd_elf_size_dynsym_hash_dynstr
+ (bfd *, struct bfd_link_info *);
+extern void bfd_elf_set_dt_needed_name
+ (bfd *, const char *);
+extern const char *bfd_elf_get_dt_soname
+ (bfd *);
+extern void bfd_elf_set_dyn_lib_class
+ (bfd *, enum dynamic_lib_link_class);
+extern int bfd_elf_get_dyn_lib_class
+ (bfd *);
+extern struct bfd_link_needed_list *bfd_elf_get_runpath_list
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_elf_discard_info
+ (bfd *, struct bfd_link_info *);
+extern unsigned int _bfd_elf_default_action_discarded
+ (struct bfd_section *);
+
+/* Return an upper bound on the number of bytes required to store a
+ copy of ABFD's program header table entries. Return -1 if an error
+ occurs; bfd_get_error will return an appropriate code. */
+extern long bfd_get_elf_phdr_upper_bound
+ (bfd *abfd);
+
+/* Copy ABFD's program header table entries to *PHDRS. The entries
+ will be stored as an array of Elf_Internal_Phdr structures, as
+ defined in include/elf/internal.h. To find out how large the
+ buffer needs to be, call bfd_get_elf_phdr_upper_bound.
+
+ Return the number of program header table entries read, or -1 if an
+ error occurs; bfd_get_error will return an appropriate code. */
+extern int bfd_get_elf_phdrs
+ (bfd *abfd, void *phdrs);
+
+/* Create a new BFD as if by bfd_openr. Rather than opening a file,
+ reconstruct an ELF file by reading the segments out of remote memory
+ based on the ELF file header at EHDR_VMA and the ELF program headers it
+ points to. If not null, *LOADBASEP is filled in with the difference
+ between the VMAs from which the segments were read, and the VMAs the
+ file headers (and hence BFD's idea of each section's VMA) put them at.
+
+ The function TARGET_READ_MEMORY is called to copy LEN bytes from the
+ remote memory at target address VMA into the local buffer at MYADDR; it
+ should return zero on success or an `errno' code on failure. TEMPL must
+ be a BFD for an ELF target with the word size and byte order found in
+ the remote memory. */
+extern bfd *bfd_elf_bfd_from_remote_memory
+ (bfd *templ, bfd_vma ehdr_vma, bfd_vma *loadbasep,
+ int (*target_read_memory) (bfd_vma vma, bfd_byte *myaddr,
+ bfd_size_type len));
+
+extern struct bfd_section *_bfd_elf_tls_setup
+ (bfd *, struct bfd_link_info *);
+
+extern struct bfd_section *
+_bfd_nearby_section (bfd *, struct bfd_section *, bfd_vma);
+
+extern void _bfd_fix_excluded_sec_syms
+ (bfd *, struct bfd_link_info *);
+
+extern unsigned bfd_m68k_mach_to_features (int);
+
+extern int bfd_m68k_features_to_mach (unsigned);
+
+extern bfd_boolean bfd_m68k_elf32_create_embedded_relocs
+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *,
+ char **);
+
+extern void bfd_elf_m68k_set_target_options (struct bfd_link_info *, int);
+
+extern bfd_boolean bfd_bfin_elf32_create_embedded_relocs
+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *,
+ char **);
+
+extern bfd_boolean bfd_cr16_elf32_create_embedded_relocs
+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *,
+ char **);
+
+/* SunOS shared library support routines for the linker. */
+
+extern struct bfd_link_needed_list *bfd_sunos_get_needed_list
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_sunos_record_link_assignment
+ (bfd *, struct bfd_link_info *, const char *);
+extern bfd_boolean bfd_sunos_size_dynamic_sections
+ (bfd *, struct bfd_link_info *, struct bfd_section **,
+ struct bfd_section **, struct bfd_section **);
+
+/* Linux shared library support routines for the linker. */
+
+extern bfd_boolean bfd_i386linux_size_dynamic_sections
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_m68klinux_size_dynamic_sections
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_sparclinux_size_dynamic_sections
+ (bfd *, struct bfd_link_info *);
+
+/* mmap hacks */
+
+struct _bfd_window_internal;
+typedef struct _bfd_window_internal bfd_window_internal;
+
+typedef struct _bfd_window
+{
+ /* What the user asked for. */
+ void *data;
+ bfd_size_type size;
+ /* The actual window used by BFD. Small user-requested read-only
+ regions sharing a page may share a single window into the object
+ file. Read-write versions shouldn't until I've fixed things to
+ keep track of which portions have been claimed by the
+ application; don't want to give the same region back when the
+ application wants two writable copies! */
+ struct _bfd_window_internal *i;
+}
+bfd_window;
+
+extern void bfd_init_window
+ (bfd_window *);
+extern void bfd_free_window
+ (bfd_window *);
+extern bfd_boolean bfd_get_file_window
+ (bfd *, file_ptr, bfd_size_type, bfd_window *, bfd_boolean);
+
+/* XCOFF support routines for the linker. */
+
+extern bfd_boolean bfd_xcoff_split_import_path
+ (bfd *, const char *, const char **, const char **);
+extern bfd_boolean bfd_xcoff_set_archive_import_path
+ (struct bfd_link_info *, bfd *, const char *);
+extern bfd_boolean bfd_xcoff_link_record_set
+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_size_type);
+extern bfd_boolean bfd_xcoff_import_symbol
+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_vma,
+ const char *, const char *, const char *, unsigned int);
+extern bfd_boolean bfd_xcoff_export_symbol
+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *);
+extern bfd_boolean bfd_xcoff_link_count_reloc
+ (bfd *, struct bfd_link_info *, const char *);
+extern bfd_boolean bfd_xcoff_record_link_assignment
+ (bfd *, struct bfd_link_info *, const char *);
+extern bfd_boolean bfd_xcoff_size_dynamic_sections
+ (bfd *, struct bfd_link_info *, const char *, const char *,
+ unsigned long, unsigned long, unsigned long, bfd_boolean,
+ int, bfd_boolean, unsigned int, struct bfd_section **, bfd_boolean);
+extern bfd_boolean bfd_xcoff_link_generate_rtinit
+ (bfd *, const char *, const char *, bfd_boolean);
+
+/* XCOFF support routines for ar. */
+extern bfd_boolean bfd_xcoff_ar_archive_set_magic
+ (bfd *, char *);
+
+/* Externally visible COFF routines. */
+
+#if defined(__STDC__) || defined(ALMOST_STDC)
+struct internal_syment;
+union internal_auxent;
+#endif
+
+extern bfd_boolean bfd_coff_get_syment
+ (bfd *, struct bfd_symbol *, struct internal_syment *);
+
+extern bfd_boolean bfd_coff_get_auxent
+ (bfd *, struct bfd_symbol *, int, union internal_auxent *);
+
+extern bfd_boolean bfd_coff_set_symbol_class
+ (bfd *, struct bfd_symbol *, unsigned int);
+
+extern bfd_boolean bfd_m68k_coff_create_embedded_relocs
+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *, char **);
+
+/* ARM VFP11 erratum workaround support. */
+typedef enum
+{
+ BFD_ARM_VFP11_FIX_DEFAULT,
+ BFD_ARM_VFP11_FIX_NONE,
+ BFD_ARM_VFP11_FIX_SCALAR,
+ BFD_ARM_VFP11_FIX_VECTOR
+} bfd_arm_vfp11_fix;
+
+extern void bfd_elf32_arm_init_maps
+ (bfd *);
+
+extern void bfd_elf32_arm_set_vfp11_fix
+ (bfd *, struct bfd_link_info *);
+
+extern void bfd_elf32_arm_set_cortex_a8_fix
+ (bfd *, struct bfd_link_info *);
+
+extern bfd_boolean bfd_elf32_arm_vfp11_erratum_scan
+ (bfd *, struct bfd_link_info *);
+
+extern void bfd_elf32_arm_vfp11_fix_veneer_locations
+ (bfd *, struct bfd_link_info *);
+
+/* ARM Interworking support. Called from linker. */
+extern bfd_boolean bfd_arm_allocate_interworking_sections
+ (struct bfd_link_info *);
+
+extern bfd_boolean bfd_arm_process_before_allocation
+ (bfd *, struct bfd_link_info *, int);
+
+extern bfd_boolean bfd_arm_get_bfd_for_interworking
+ (bfd *, struct bfd_link_info *);
+
+/* PE ARM Interworking support. Called from linker. */
+extern bfd_boolean bfd_arm_pe_allocate_interworking_sections
+ (struct bfd_link_info *);
+
+extern bfd_boolean bfd_arm_pe_process_before_allocation
+ (bfd *, struct bfd_link_info *, int);
+
+extern bfd_boolean bfd_arm_pe_get_bfd_for_interworking
+ (bfd *, struct bfd_link_info *);
+
+/* ELF ARM Interworking support. Called from linker. */
+extern bfd_boolean bfd_elf32_arm_allocate_interworking_sections
+ (struct bfd_link_info *);
+
+extern bfd_boolean bfd_elf32_arm_process_before_allocation
+ (bfd *, struct bfd_link_info *);
+
+void bfd_elf32_arm_set_target_relocs
+ (bfd *, struct bfd_link_info *, int, char *, int, int, bfd_arm_vfp11_fix,
+ int, int, int, int, int);
+
+extern bfd_boolean bfd_elf32_arm_get_bfd_for_interworking
+ (bfd *, struct bfd_link_info *);
+
+extern bfd_boolean bfd_elf32_arm_add_glue_sections_to_bfd
+ (bfd *, struct bfd_link_info *);
+
+/* ELF ARM mapping symbol support */
+#define BFD_ARM_SPECIAL_SYM_TYPE_MAP (1 << 0)
+#define BFD_ARM_SPECIAL_SYM_TYPE_TAG (1 << 1)
+#define BFD_ARM_SPECIAL_SYM_TYPE_OTHER (1 << 2)
+#define BFD_ARM_SPECIAL_SYM_TYPE_ANY (~0)
+extern bfd_boolean bfd_is_arm_special_symbol_name
+ (const char * name, int type);
+
+extern void bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *, int);
+
+/* ARM Note section processing. */
+extern bfd_boolean bfd_arm_merge_machines
+ (bfd *, bfd *);
+
+extern bfd_boolean bfd_arm_update_notes
+ (bfd *, const char *);
+
+extern unsigned int bfd_arm_get_mach_from_notes
+ (bfd *, const char *);
+
+/* ARM stub generation support. Called from the linker. */
+extern int elf32_arm_setup_section_lists
+ (bfd *, struct bfd_link_info *);
+extern void elf32_arm_next_input_section
+ (struct bfd_link_info *, struct bfd_section *);
+extern bfd_boolean elf32_arm_size_stubs
+ (bfd *, bfd *, struct bfd_link_info *, bfd_signed_vma,
+ struct bfd_section * (*) (const char *, struct bfd_section *), void (*) (void));
+extern bfd_boolean elf32_arm_build_stubs
+ (struct bfd_link_info *);
+
+/* ARM unwind section editing support. */
+extern bfd_boolean elf32_arm_fix_exidx_coverage
+(struct bfd_section **, unsigned int, struct bfd_link_info *, bfd_boolean);
+
+/* C6x unwind section editing support. */
+extern bfd_boolean elf32_tic6x_fix_exidx_coverage
+(struct bfd_section **, unsigned int, struct bfd_link_info *, bfd_boolean);
+
+/* PowerPC @tls opcode transform/validate. */
+extern unsigned int _bfd_elf_ppc_at_tls_transform
+ (unsigned int, unsigned int);
+/* PowerPC @tprel opcode transform/validate. */
+extern unsigned int _bfd_elf_ppc_at_tprel_transform
+ (unsigned int, unsigned int);
+
+/* TI COFF load page support. */
+extern void bfd_ticoff_set_section_load_page
+ (struct bfd_section *, int);
+
+extern int bfd_ticoff_get_section_load_page
+ (struct bfd_section *);
+
+/* H8/300 functions. */
+extern bfd_vma bfd_h8300_pad_address
+ (bfd *, bfd_vma);
+
+/* IA64 Itanium code generation. Called from linker. */
+extern void bfd_elf32_ia64_after_parse
+ (int);
+
+extern void bfd_elf64_ia64_after_parse
+ (int);
+
+/* This structure is used for a comdat section, as in PE. A comdat
+ section is associated with a particular symbol. When the linker
+ sees a comdat section, it keeps only one of the sections with a
+ given name and associated with a given symbol. */
+
+struct coff_comdat_info
+{
+ /* The name of the symbol associated with a comdat section. */
+ const char *name;
+
+ /* The local symbol table index of the symbol associated with a
+ comdat section. This is only meaningful to the object file format
+ specific code; it is not an index into the list returned by
+ bfd_canonicalize_symtab. */
+ long symbol;
+};
+
+extern struct coff_comdat_info *bfd_coff_get_comdat_section
+ (bfd *, struct bfd_section *);
+
+/* Extracted from init.c. */
+void bfd_init (void);
+
+/* Extracted from opncls.c. */
+extern unsigned int bfd_use_reserved_id;
+bfd *bfd_fopen (const char *filename, const char *target,
+ const char *mode, int fd);
+
+bfd *bfd_openr (const char *filename, const char *target);
+
+bfd *bfd_fdopenr (const char *filename, const char *target, int fd);
+
+bfd *bfd_openstreamr (const char *, const char *, void *);
+
+bfd *bfd_openr_iovec (const char *filename, const char *target,
+ void *(*open_func) (struct bfd *nbfd,
+ void *open_closure),
+ void *open_closure,
+ file_ptr (*pread_func) (struct bfd *nbfd,
+ void *stream,
+ void *buf,
+ file_ptr nbytes,
+ file_ptr offset),
+ int (*close_func) (struct bfd *nbfd,
+ void *stream),
+ int (*stat_func) (struct bfd *abfd,
+ void *stream,
+ struct stat *sb));
+
+bfd *bfd_openw (const char *filename, const char *target);
+
+bfd_boolean bfd_close (bfd *abfd);
+
+bfd_boolean bfd_close_all_done (bfd *);
+
+bfd *bfd_create (const char *filename, bfd *templ);
+
+bfd_boolean bfd_make_writable (bfd *abfd);
+
+bfd_boolean bfd_make_readable (bfd *abfd);
+
+void *bfd_alloc (bfd *abfd, bfd_size_type wanted);
+
+void *bfd_zalloc (bfd *abfd, bfd_size_type wanted);
+
+unsigned long bfd_calc_gnu_debuglink_crc32
+ (unsigned long crc, const unsigned char *buf, bfd_size_type len);
+
+char *bfd_follow_gnu_debuglink (bfd *abfd, const char *dir);
+
+struct bfd_section *bfd_create_gnu_debuglink_section
+ (bfd *abfd, const char *filename);
+
+bfd_boolean bfd_fill_in_gnu_debuglink_section
+ (bfd *abfd, struct bfd_section *sect, const char *filename);
+
+/* Extracted from libbfd.c. */
+
+/* Byte swapping macros for user section data. */
+
+#define bfd_put_8(abfd, val, ptr) \
+ ((void) (*((unsigned char *) (ptr)) = (val) & 0xff))
+#define bfd_put_signed_8 \
+ bfd_put_8
+#define bfd_get_8(abfd, ptr) \
+ (*(const unsigned char *) (ptr) & 0xff)
+#define bfd_get_signed_8(abfd, ptr) \
+ (((*(const unsigned char *) (ptr) & 0xff) ^ 0x80) - 0x80)
+
+#define bfd_put_16(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_putx16, ((val),(ptr)))
+#define bfd_put_signed_16 \
+ bfd_put_16
+#define bfd_get_16(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx16, (ptr))
+#define bfd_get_signed_16(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx_signed_16, (ptr))
+
+#define bfd_put_32(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_putx32, ((val),(ptr)))
+#define bfd_put_signed_32 \
+ bfd_put_32
+#define bfd_get_32(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx32, (ptr))
+#define bfd_get_signed_32(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx_signed_32, (ptr))
+
+#define bfd_put_64(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_putx64, ((val), (ptr)))
+#define bfd_put_signed_64 \
+ bfd_put_64
+#define bfd_get_64(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx64, (ptr))
+#define bfd_get_signed_64(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx_signed_64, (ptr))
+
+#define bfd_get(bits, abfd, ptr) \
+ ((bits) == 8 ? (bfd_vma) bfd_get_8 (abfd, ptr) \
+ : (bits) == 16 ? bfd_get_16 (abfd, ptr) \
+ : (bits) == 32 ? bfd_get_32 (abfd, ptr) \
+ : (bits) == 64 ? bfd_get_64 (abfd, ptr) \
+ : (abort (), (bfd_vma) - 1))
+
+#define bfd_put(bits, abfd, val, ptr) \
+ ((bits) == 8 ? bfd_put_8 (abfd, val, ptr) \
+ : (bits) == 16 ? bfd_put_16 (abfd, val, ptr) \
+ : (bits) == 32 ? bfd_put_32 (abfd, val, ptr) \
+ : (bits) == 64 ? bfd_put_64 (abfd, val, ptr) \
+ : (abort (), (void) 0))
+
+
+/* Byte swapping macros for file header data. */
+
+#define bfd_h_put_8(abfd, val, ptr) \
+ bfd_put_8 (abfd, val, ptr)
+#define bfd_h_put_signed_8(abfd, val, ptr) \
+ bfd_put_8 (abfd, val, ptr)
+#define bfd_h_get_8(abfd, ptr) \
+ bfd_get_8 (abfd, ptr)
+#define bfd_h_get_signed_8(abfd, ptr) \
+ bfd_get_signed_8 (abfd, ptr)
+
+#define bfd_h_put_16(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_h_putx16, (val, ptr))
+#define bfd_h_put_signed_16 \
+ bfd_h_put_16
+#define bfd_h_get_16(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx16, (ptr))
+#define bfd_h_get_signed_16(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx_signed_16, (ptr))
+
+#define bfd_h_put_32(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_h_putx32, (val, ptr))
+#define bfd_h_put_signed_32 \
+ bfd_h_put_32
+#define bfd_h_get_32(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx32, (ptr))
+#define bfd_h_get_signed_32(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx_signed_32, (ptr))
+
+#define bfd_h_put_64(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_h_putx64, (val, ptr))
+#define bfd_h_put_signed_64 \
+ bfd_h_put_64
+#define bfd_h_get_64(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx64, (ptr))
+#define bfd_h_get_signed_64(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx_signed_64, (ptr))
+
+/* Aliases for the above, which should eventually go away. */
+
+#define H_PUT_64 bfd_h_put_64
+#define H_PUT_32 bfd_h_put_32
+#define H_PUT_16 bfd_h_put_16
+#define H_PUT_8 bfd_h_put_8
+#define H_PUT_S64 bfd_h_put_signed_64
+#define H_PUT_S32 bfd_h_put_signed_32
+#define H_PUT_S16 bfd_h_put_signed_16
+#define H_PUT_S8 bfd_h_put_signed_8
+#define H_GET_64 bfd_h_get_64
+#define H_GET_32 bfd_h_get_32
+#define H_GET_16 bfd_h_get_16
+#define H_GET_8 bfd_h_get_8
+#define H_GET_S64 bfd_h_get_signed_64
+#define H_GET_S32 bfd_h_get_signed_32
+#define H_GET_S16 bfd_h_get_signed_16
+#define H_GET_S8 bfd_h_get_signed_8
+
+
+/* Extracted from bfdio.c. */
+long bfd_get_mtime (bfd *abfd);
+
+file_ptr bfd_get_size (bfd *abfd);
+
+void *bfd_mmap (bfd *abfd, void *addr, bfd_size_type len,
+ int prot, int flags, file_ptr offset,
+ void **map_addr, bfd_size_type *map_len);
+
+/* Extracted from bfdwin.c. */
+/* Extracted from section.c. */
+typedef struct bfd_section
+{
+ /* The name of the section; the name isn't a copy, the pointer is
+ the same as that passed to bfd_make_section. */
+ const char *name;
+
+ /* A unique sequence number. */
+ int id;
+
+ /* Which section in the bfd; 0..n-1 as sections are created in a bfd. */
+ int index;
+
+ /* The next section in the list belonging to the BFD, or NULL. */
+ struct bfd_section *next;
+
+ /* The previous section in the list belonging to the BFD, or NULL. */
+ struct bfd_section *prev;
+
+ /* The field flags contains attributes of the section. Some
+ flags are read in from the object file, and some are
+ synthesized from other information. */
+ flagword flags;
+
+#define SEC_NO_FLAGS 0x000
+
+ /* Tells the OS to allocate space for this section when loading.
+ This is clear for a section containing debug information only. */
+#define SEC_ALLOC 0x001
+
+ /* Tells the OS to load the section from the file when loading.
+ This is clear for a .bss section. */
+#define SEC_LOAD 0x002
+
+ /* The section contains data still to be relocated, so there is
+ some relocation information too. */
+#define SEC_RELOC 0x004
+
+ /* A signal to the OS that the section contains read only data. */
+#define SEC_READONLY 0x008
+
+ /* The section contains code only. */
+#define SEC_CODE 0x010
+
+ /* The section contains data only. */
+#define SEC_DATA 0x020
+
+ /* The section will reside in ROM. */
+#define SEC_ROM 0x040
+
+ /* The section contains constructor information. This section
+ type is used by the linker to create lists of constructors and
+ destructors used by <<g++>>. When a back end sees a symbol
+ which should be used in a constructor list, it creates a new
+ section for the type of name (e.g., <<__CTOR_LIST__>>), attaches
+ the symbol to it, and builds a relocation. To build the lists
+ of constructors, all the linker has to do is catenate all the
+ sections called <<__CTOR_LIST__>> and relocate the data
+ contained within - exactly the operations it would peform on
+ standard data. */
+#define SEC_CONSTRUCTOR 0x080
+
+ /* The section has contents - a data section could be
+ <<SEC_ALLOC>> | <<SEC_HAS_CONTENTS>>; a debug section could be
+ <<SEC_HAS_CONTENTS>> */
+#define SEC_HAS_CONTENTS 0x100
+
+ /* An instruction to the linker to not output the section
+ even if it has information which would normally be written. */
+#define SEC_NEVER_LOAD 0x200
+
+ /* The section contains thread local data. */
+#define SEC_THREAD_LOCAL 0x400
+
+ /* The section has GOT references. This flag is only for the
+ linker, and is currently only used by the elf32-hppa back end.
+ It will be set if global offset table references were detected
+ in this section, which indicate to the linker that the section
+ contains PIC code, and must be handled specially when doing a
+ static link. */
+#define SEC_HAS_GOT_REF 0x800
+
+ /* The section contains common symbols (symbols may be defined
+ multiple times, the value of a symbol is the amount of
+ space it requires, and the largest symbol value is the one
+ used). Most targets have exactly one of these (which we
+ translate to bfd_com_section_ptr), but ECOFF has two. */
+#define SEC_IS_COMMON 0x1000
+
+ /* The section contains only debugging information. For
+ example, this is set for ELF .debug and .stab sections.
+ strip tests this flag to see if a section can be
+ discarded. */
+#define SEC_DEBUGGING 0x2000
+
+ /* The contents of this section are held in memory pointed to
+ by the contents field. This is checked by bfd_get_section_contents,
+ and the data is retrieved from memory if appropriate. */
+#define SEC_IN_MEMORY 0x4000
+
+ /* The contents of this section are to be excluded by the
+ linker for executable and shared objects unless those
+ objects are to be further relocated. */
+#define SEC_EXCLUDE 0x8000
+
+ /* The contents of this section are to be sorted based on the sum of
+ the symbol and addend values specified by the associated relocation
+ entries. Entries without associated relocation entries will be
+ appended to the end of the section in an unspecified order. */
+#define SEC_SORT_ENTRIES 0x10000
+
+ /* When linking, duplicate sections of the same name should be
+ discarded, rather than being combined into a single section as
+ is usually done. This is similar to how common symbols are
+ handled. See SEC_LINK_DUPLICATES below. */
+#define SEC_LINK_ONCE 0x20000
+
+ /* If SEC_LINK_ONCE is set, this bitfield describes how the linker
+ should handle duplicate sections. */
+#define SEC_LINK_DUPLICATES 0xc0000
+
+ /* This value for SEC_LINK_DUPLICATES means that duplicate
+ sections with the same name should simply be discarded. */
+#define SEC_LINK_DUPLICATES_DISCARD 0x0
+
+ /* This value for SEC_LINK_DUPLICATES means that the linker
+ should warn if there are any duplicate sections, although
+ it should still only link one copy. */
+#define SEC_LINK_DUPLICATES_ONE_ONLY 0x40000
+
+ /* This value for SEC_LINK_DUPLICATES means that the linker
+ should warn if any duplicate sections are a different size. */
+#define SEC_LINK_DUPLICATES_SAME_SIZE 0x80000
+
+ /* This value for SEC_LINK_DUPLICATES means that the linker
+ should warn if any duplicate sections contain different
+ contents. */
+#define SEC_LINK_DUPLICATES_SAME_CONTENTS \
+ (SEC_LINK_DUPLICATES_ONE_ONLY | SEC_LINK_DUPLICATES_SAME_SIZE)
+
+ /* This section was created by the linker as part of dynamic
+ relocation or other arcane processing. It is skipped when
+ going through the first-pass output, trusting that someone
+ else up the line will take care of it later. */
+#define SEC_LINKER_CREATED 0x100000
+
+ /* This section should not be subject to garbage collection.
+ Also set to inform the linker that this section should not be
+ listed in the link map as discarded. */
+#define SEC_KEEP 0x200000
+
+ /* This section contains "short" data, and should be placed
+ "near" the GP. */
+#define SEC_SMALL_DATA 0x400000
+
+ /* Attempt to merge identical entities in the section.
+ Entity size is given in the entsize field. */
+#define SEC_MERGE 0x800000
+
+ /* If given with SEC_MERGE, entities to merge are zero terminated
+ strings where entsize specifies character size instead of fixed
+ size entries. */
+#define SEC_STRINGS 0x1000000
+
+ /* This section contains data about section groups. */
+#define SEC_GROUP 0x2000000
+
+ /* The section is a COFF shared library section. This flag is
+ only for the linker. If this type of section appears in
+ the input file, the linker must copy it to the output file
+ without changing the vma or size. FIXME: Although this
+ was originally intended to be general, it really is COFF
+ specific (and the flag was renamed to indicate this). It
+ might be cleaner to have some more general mechanism to
+ allow the back end to control what the linker does with
+ sections. */
+#define SEC_COFF_SHARED_LIBRARY 0x4000000
+
+ /* This input section should be copied to output in reverse order
+ as an array of pointers. This is for ELF linker internal use
+ only. */
+#define SEC_ELF_REVERSE_COPY 0x4000000
+
+ /* This section contains data which may be shared with other
+ executables or shared objects. This is for COFF only. */
+#define SEC_COFF_SHARED 0x8000000
+
+ /* When a section with this flag is being linked, then if the size of
+ the input section is less than a page, it should not cross a page
+ boundary. If the size of the input section is one page or more,
+ it should be aligned on a page boundary. This is for TI
+ TMS320C54X only. */
+#define SEC_TIC54X_BLOCK 0x10000000
+
+ /* Conditionally link this section; do not link if there are no
+ references found to any symbol in the section. This is for TI
+ TMS320C54X only. */
+#define SEC_TIC54X_CLINK 0x20000000
+
+ /* Indicate that section has the no read flag set. This happens
+ when memory read flag isn't set. */
+#define SEC_COFF_NOREAD 0x40000000
+
+ /* End of section flags. */
+
+ /* Some internal packed boolean fields. */
+
+ /* See the vma field. */
+ unsigned int user_set_vma : 1;
+
+ /* A mark flag used by some of the linker backends. */
+ unsigned int linker_mark : 1;
+
+ /* Another mark flag used by some of the linker backends. Set for
+ output sections that have an input section. */
+ unsigned int linker_has_input : 1;
+
+ /* Mark flag used by some linker backends for garbage collection. */
+ unsigned int gc_mark : 1;
+
+ /* Section compression status. */
+ unsigned int compress_status : 2;
+#define COMPRESS_SECTION_NONE 0
+#define COMPRESS_SECTION_DONE 1
+#define DECOMPRESS_SECTION_SIZED 2
+
+ /* The following flags are used by the ELF linker. */
+
+ /* Mark sections which have been allocated to segments. */
+ unsigned int segment_mark : 1;
+
+ /* Type of sec_info information. */
+ unsigned int sec_info_type:3;
+#define SEC_INFO_TYPE_NONE 0
+#define SEC_INFO_TYPE_STABS 1
+#define SEC_INFO_TYPE_MERGE 2
+#define SEC_INFO_TYPE_EH_FRAME 3
+#define SEC_INFO_TYPE_JUST_SYMS 4
+
+ /* Nonzero if this section uses RELA relocations, rather than REL. */
+ unsigned int use_rela_p:1;
+
+ /* Bits used by various backends. The generic code doesn't touch
+ these fields. */
+
+ unsigned int sec_flg0:1;
+ unsigned int sec_flg1:1;
+ unsigned int sec_flg2:1;
+ unsigned int sec_flg3:1;
+ unsigned int sec_flg4:1;
+ unsigned int sec_flg5:1;
+
+ /* End of internal packed boolean fields. */
+
+ /* The virtual memory address of the section - where it will be
+ at run time. The symbols are relocated against this. The
+ user_set_vma flag is maintained by bfd; if it's not set, the
+ backend can assign addresses (for example, in <<a.out>>, where
+ the default address for <<.data>> is dependent on the specific
+ target and various flags). */
+ bfd_vma vma;
+
+ /* The load address of the section - where it would be in a
+ rom image; really only used for writing section header
+ information. */
+ bfd_vma lma;
+
+ /* The size of the section in octets, as it will be output.
+ Contains a value even if the section has no contents (e.g., the
+ size of <<.bss>>). */
+ bfd_size_type size;
+
+ /* For input sections, the original size on disk of the section, in
+ octets. This field should be set for any section whose size is
+ changed by linker relaxation. It is required for sections where
+ the linker relaxation scheme doesn't cache altered section and
+ reloc contents (stabs, eh_frame, SEC_MERGE, some coff relaxing
+ targets), and thus the original size needs to be kept to read the
+ section multiple times. For output sections, rawsize holds the
+ section size calculated on a previous linker relaxation pass. */
+ bfd_size_type rawsize;
+
+ /* The compressed size of the section in octets. */
+ bfd_size_type compressed_size;
+
+ /* Relaxation table. */
+ struct relax_table *relax;
+
+ /* Count of used relaxation table entries. */
+ int relax_count;
+
+
+ /* If this section is going to be output, then this value is the
+ offset in *bytes* into the output section of the first byte in the
+ input section (byte ==> smallest addressable unit on the
+ target). In most cases, if this was going to start at the
+ 100th octet (8-bit quantity) in the output section, this value
+ would be 100. However, if the target byte size is 16 bits
+ (bfd_octets_per_byte is "2"), this value would be 50. */
+ bfd_vma output_offset;
+
+ /* The output section through which to map on output. */
+ struct bfd_section *output_section;
+
+ /* The alignment requirement of the section, as an exponent of 2 -
+ e.g., 3 aligns to 2^3 (or 8). */
+ unsigned int alignment_power;
+
+ /* If an input section, a pointer to a vector of relocation
+ records for the data in this section. */
+ struct reloc_cache_entry *relocation;
+
+ /* If an output section, a pointer to a vector of pointers to
+ relocation records for the data in this section. */
+ struct reloc_cache_entry **orelocation;
+
+ /* The number of relocation records in one of the above. */
+ unsigned reloc_count;
+
+ /* Information below is back end specific - and not always used
+ or updated. */
+
+ /* File position of section data. */
+ file_ptr filepos;
+
+ /* File position of relocation info. */
+ file_ptr rel_filepos;
+
+ /* File position of line data. */
+ file_ptr line_filepos;
+
+ /* Pointer to data for applications. */
+ void *userdata;
+
+ /* If the SEC_IN_MEMORY flag is set, this points to the actual
+ contents. */
+ unsigned char *contents;
+
+ /* Attached line number information. */
+ alent *lineno;
+
+ /* Number of line number records. */
+ unsigned int lineno_count;
+
+ /* Entity size for merging purposes. */
+ unsigned int entsize;
+
+ /* Points to the kept section if this section is a link-once section,
+ and is discarded. */
+ struct bfd_section *kept_section;
+
+ /* When a section is being output, this value changes as more
+ linenumbers are written out. */
+ file_ptr moving_line_filepos;
+
+ /* What the section number is in the target world. */
+ int target_index;
+
+ void *used_by_bfd;
+
+ /* If this is a constructor section then here is a list of the
+ relocations created to relocate items within it. */
+ struct relent_chain *constructor_chain;
+
+ /* The BFD which owns the section. */
+ bfd *owner;
+
+ /* A symbol which points at this section only. */
+ struct bfd_symbol *symbol;
+ struct bfd_symbol **symbol_ptr_ptr;
+
+ /* Early in the link process, map_head and map_tail are used to build
+ a list of input sections attached to an output section. Later,
+ output sections use these fields for a list of bfd_link_order
+ structs. */
+ union {
+ struct bfd_link_order *link_order;
+ struct bfd_section *s;
+ } map_head, map_tail;
+} asection;
+
+/* Relax table contains information about instructions which can
+ be removed by relaxation -- replacing a long address with a
+ short address. */
+struct relax_table {
+ /* Address where bytes may be deleted. */
+ bfd_vma addr;
+
+ /* Number of bytes to be deleted. */
+ int size;
+};
+
+/* These sections are global, and are managed by BFD. The application
+ and target back end are not permitted to change the values in
+ these sections. */
+extern asection std_section[4];
+
+#define BFD_ABS_SECTION_NAME "*ABS*"
+#define BFD_UND_SECTION_NAME "*UND*"
+#define BFD_COM_SECTION_NAME "*COM*"
+#define BFD_IND_SECTION_NAME "*IND*"
+
+/* Pointer to the common section. */
+#define bfd_com_section_ptr (&std_section[0])
+/* Pointer to the undefined section. */
+#define bfd_und_section_ptr (&std_section[1])
+/* Pointer to the absolute section. */
+#define bfd_abs_section_ptr (&std_section[2])
+/* Pointer to the indirect section. */
+#define bfd_ind_section_ptr (&std_section[3])
+
+#define bfd_is_und_section(sec) ((sec) == bfd_und_section_ptr)
+#define bfd_is_abs_section(sec) ((sec) == bfd_abs_section_ptr)
+#define bfd_is_ind_section(sec) ((sec) == bfd_ind_section_ptr)
+
+#define bfd_is_const_section(SEC) \
+ ( ((SEC) == bfd_abs_section_ptr) \
+ || ((SEC) == bfd_und_section_ptr) \
+ || ((SEC) == bfd_com_section_ptr) \
+ || ((SEC) == bfd_ind_section_ptr))
+
+/* Macros to handle insertion and deletion of a bfd's sections. These
+ only handle the list pointers, ie. do not adjust section_count,
+ target_index etc. */
+#define bfd_section_list_remove(ABFD, S) \
+ do \
+ { \
+ asection *_s = S; \
+ asection *_next = _s->next; \
+ asection *_prev = _s->prev; \
+ if (_prev) \
+ _prev->next = _next; \
+ else \
+ (ABFD)->sections = _next; \
+ if (_next) \
+ _next->prev = _prev; \
+ else \
+ (ABFD)->section_last = _prev; \
+ } \
+ while (0)
+#define bfd_section_list_append(ABFD, S) \
+ do \
+ { \
+ asection *_s = S; \
+ bfd *_abfd = ABFD; \
+ _s->next = NULL; \
+ if (_abfd->section_last) \
+ { \
+ _s->prev = _abfd->section_last; \
+ _abfd->section_last->next = _s; \
+ } \
+ else \
+ { \
+ _s->prev = NULL; \
+ _abfd->sections = _s; \
+ } \
+ _abfd->section_last = _s; \
+ } \
+ while (0)
+#define bfd_section_list_prepend(ABFD, S) \
+ do \
+ { \
+ asection *_s = S; \
+ bfd *_abfd = ABFD; \
+ _s->prev = NULL; \
+ if (_abfd->sections) \
+ { \
+ _s->next = _abfd->sections; \
+ _abfd->sections->prev = _s; \
+ } \
+ else \
+ { \
+ _s->next = NULL; \
+ _abfd->section_last = _s; \
+ } \
+ _abfd->sections = _s; \
+ } \
+ while (0)
+#define bfd_section_list_insert_after(ABFD, A, S) \
+ do \
+ { \
+ asection *_a = A; \
+ asection *_s = S; \
+ asection *_next = _a->next; \
+ _s->next = _next; \
+ _s->prev = _a; \
+ _a->next = _s; \
+ if (_next) \
+ _next->prev = _s; \
+ else \
+ (ABFD)->section_last = _s; \
+ } \
+ while (0)
+#define bfd_section_list_insert_before(ABFD, B, S) \
+ do \
+ { \
+ asection *_b = B; \
+ asection *_s = S; \
+ asection *_prev = _b->prev; \
+ _s->prev = _prev; \
+ _s->next = _b; \
+ _b->prev = _s; \
+ if (_prev) \
+ _prev->next = _s; \
+ else \
+ (ABFD)->sections = _s; \
+ } \
+ while (0)
+#define bfd_section_removed_from_list(ABFD, S) \
+ ((S)->next == NULL ? (ABFD)->section_last != (S) : (S)->next->prev != (S))
+
+#define BFD_FAKE_SECTION(SEC, FLAGS, SYM, NAME, IDX) \
+ /* name, id, index, next, prev, flags, user_set_vma, */ \
+ { NAME, IDX, 0, NULL, NULL, FLAGS, 0, \
+ \
+ /* linker_mark, linker_has_input, gc_mark, decompress_status, */ \
+ 0, 0, 1, 0, \
+ \
+ /* segment_mark, sec_info_type, use_rela_p, */ \
+ 0, 0, 0, \
+ \
+ /* sec_flg0, sec_flg1, sec_flg2, sec_flg3, sec_flg4, sec_flg5, */ \
+ 0, 0, 0, 0, 0, 0, \
+ \
+ /* vma, lma, size, rawsize, compressed_size, relax, relax_count, */ \
+ 0, 0, 0, 0, 0, 0, 0, \
+ \
+ /* output_offset, output_section, alignment_power, */ \
+ 0, &SEC, 0, \
+ \
+ /* relocation, orelocation, reloc_count, filepos, rel_filepos, */ \
+ NULL, NULL, 0, 0, 0, \
+ \
+ /* line_filepos, userdata, contents, lineno, lineno_count, */ \
+ 0, NULL, NULL, NULL, 0, \
+ \
+ /* entsize, kept_section, moving_line_filepos, */ \
+ 0, NULL, 0, \
+ \
+ /* target_index, used_by_bfd, constructor_chain, owner, */ \
+ 0, NULL, NULL, NULL, \
+ \
+ /* symbol, symbol_ptr_ptr, */ \
+ (struct bfd_symbol *) SYM, &SEC.symbol, \
+ \
+ /* map_head, map_tail */ \
+ { NULL }, { NULL } \
+ }
+
+void bfd_section_list_clear (bfd *);
+
+asection *bfd_get_section_by_name (bfd *abfd, const char *name);
+
+asection *bfd_get_next_section_by_name (asection *sec);
+
+asection *bfd_get_linker_section (bfd *abfd, const char *name);
+
+asection *bfd_get_section_by_name_if
+ (bfd *abfd,
+ const char *name,
+ bfd_boolean (*func) (bfd *abfd, asection *sect, void *obj),
+ void *obj);
+
+char *bfd_get_unique_section_name
+ (bfd *abfd, const char *templat, int *count);
+
+asection *bfd_make_section_old_way (bfd *abfd, const char *name);
+
+asection *bfd_make_section_anyway_with_flags
+ (bfd *abfd, const char *name, flagword flags);
+
+asection *bfd_make_section_anyway (bfd *abfd, const char *name);
+
+asection *bfd_make_section_with_flags
+ (bfd *, const char *name, flagword flags);
+
+asection *bfd_make_section (bfd *, const char *name);
+
+bfd_boolean bfd_set_section_flags
+ (bfd *abfd, asection *sec, flagword flags);
+
+void bfd_rename_section
+ (bfd *abfd, asection *sec, const char *newname);
+
+void bfd_map_over_sections
+ (bfd *abfd,
+ void (*func) (bfd *abfd, asection *sect, void *obj),
+ void *obj);
+
+asection *bfd_sections_find_if
+ (bfd *abfd,
+ bfd_boolean (*operation) (bfd *abfd, asection *sect, void *obj),
+ void *obj);
+
+bfd_boolean bfd_set_section_size
+ (bfd *abfd, asection *sec, bfd_size_type val);
+
+bfd_boolean bfd_set_section_contents
+ (bfd *abfd, asection *section, const void *data,
+ file_ptr offset, bfd_size_type count);
+
+bfd_boolean bfd_get_section_contents
+ (bfd *abfd, asection *section, void *location, file_ptr offset,
+ bfd_size_type count);
+
+bfd_boolean bfd_malloc_and_get_section
+ (bfd *abfd, asection *section, bfd_byte **buf);
+
+bfd_boolean bfd_copy_private_section_data
+ (bfd *ibfd, asection *isec, bfd *obfd, asection *osec);
+
+#define bfd_copy_private_section_data(ibfd, isection, obfd, osection) \
+ BFD_SEND (obfd, _bfd_copy_private_section_data, \
+ (ibfd, isection, obfd, osection))
+bfd_boolean bfd_generic_is_group_section (bfd *, const asection *sec);
+
+bfd_boolean bfd_generic_discard_group (bfd *abfd, asection *group);
+
+/* Extracted from archures.c. */
+enum bfd_architecture
+{
+ bfd_arch_unknown, /* File arch not known. */
+ bfd_arch_obscure, /* Arch known, not one of these. */
+ bfd_arch_m68k, /* Motorola 68xxx */
+#define bfd_mach_m68000 1
+#define bfd_mach_m68008 2
+#define bfd_mach_m68010 3
+#define bfd_mach_m68020 4
+#define bfd_mach_m68030 5
+#define bfd_mach_m68040 6
+#define bfd_mach_m68060 7
+#define bfd_mach_cpu32 8
+#define bfd_mach_fido 9
+#define bfd_mach_mcf_isa_a_nodiv 10
+#define bfd_mach_mcf_isa_a 11
+#define bfd_mach_mcf_isa_a_mac 12
+#define bfd_mach_mcf_isa_a_emac 13
+#define bfd_mach_mcf_isa_aplus 14
+#define bfd_mach_mcf_isa_aplus_mac 15
+#define bfd_mach_mcf_isa_aplus_emac 16
+#define bfd_mach_mcf_isa_b_nousp 17
+#define bfd_mach_mcf_isa_b_nousp_mac 18
+#define bfd_mach_mcf_isa_b_nousp_emac 19
+#define bfd_mach_mcf_isa_b 20
+#define bfd_mach_mcf_isa_b_mac 21
+#define bfd_mach_mcf_isa_b_emac 22
+#define bfd_mach_mcf_isa_b_float 23
+#define bfd_mach_mcf_isa_b_float_mac 24
+#define bfd_mach_mcf_isa_b_float_emac 25
+#define bfd_mach_mcf_isa_c 26
+#define bfd_mach_mcf_isa_c_mac 27
+#define bfd_mach_mcf_isa_c_emac 28
+#define bfd_mach_mcf_isa_c_nodiv 29
+#define bfd_mach_mcf_isa_c_nodiv_mac 30
+#define bfd_mach_mcf_isa_c_nodiv_emac 31
+ bfd_arch_vax, /* DEC Vax */
+ bfd_arch_i960, /* Intel 960 */
+ /* The order of the following is important.
+ lower number indicates a machine type that
+ only accepts a subset of the instructions
+ available to machines with higher numbers.
+ The exception is the "ca", which is
+ incompatible with all other machines except
+ "core". */
+
+#define bfd_mach_i960_core 1
+#define bfd_mach_i960_ka_sa 2
+#define bfd_mach_i960_kb_sb 3
+#define bfd_mach_i960_mc 4
+#define bfd_mach_i960_xa 5
+#define bfd_mach_i960_ca 6
+#define bfd_mach_i960_jx 7
+#define bfd_mach_i960_hx 8
+
+ bfd_arch_or32, /* OpenRISC 32 */
+
+ bfd_arch_sparc, /* SPARC */
+#define bfd_mach_sparc 1
+/* The difference between v8plus and v9 is that v9 is a true 64 bit env. */
+#define bfd_mach_sparc_sparclet 2
+#define bfd_mach_sparc_sparclite 3
+#define bfd_mach_sparc_v8plus 4
+#define bfd_mach_sparc_v8plusa 5 /* with ultrasparc add'ns. */
+#define bfd_mach_sparc_sparclite_le 6
+#define bfd_mach_sparc_v9 7
+#define bfd_mach_sparc_v9a 8 /* with ultrasparc add'ns. */
+#define bfd_mach_sparc_v8plusb 9 /* with cheetah add'ns. */
+#define bfd_mach_sparc_v9b 10 /* with cheetah add'ns. */
+/* Nonzero if MACH has the v9 instruction set. */
+#define bfd_mach_sparc_v9_p(mach) \
+ ((mach) >= bfd_mach_sparc_v8plus && (mach) <= bfd_mach_sparc_v9b \
+ && (mach) != bfd_mach_sparc_sparclite_le)
+/* Nonzero if MACH is a 64 bit sparc architecture. */
+#define bfd_mach_sparc_64bit_p(mach) \
+ ((mach) >= bfd_mach_sparc_v9 && (mach) != bfd_mach_sparc_v8plusb)
+ bfd_arch_spu, /* PowerPC SPU */
+#define bfd_mach_spu 256
+ bfd_arch_mips, /* MIPS Rxxxx */
+#define bfd_mach_mips3000 3000
+#define bfd_mach_mips3900 3900
+#define bfd_mach_mips4000 4000
+#define bfd_mach_mips4010 4010
+#define bfd_mach_mips4100 4100
+#define bfd_mach_mips4111 4111
+#define bfd_mach_mips4120 4120
+#define bfd_mach_mips4300 4300
+#define bfd_mach_mips4400 4400
+#define bfd_mach_mips4600 4600
+#define bfd_mach_mips4650 4650
+#define bfd_mach_mips5000 5000
+#define bfd_mach_mips5400 5400
+#define bfd_mach_mips5500 5500
+#define bfd_mach_mips6000 6000
+#define bfd_mach_mips7000 7000
+#define bfd_mach_mips8000 8000
+#define bfd_mach_mips9000 9000
+#define bfd_mach_mips10000 10000
+#define bfd_mach_mips12000 12000
+#define bfd_mach_mips14000 14000
+#define bfd_mach_mips16000 16000
+#define bfd_mach_mips16 16
+#define bfd_mach_mips5 5
+#define bfd_mach_mips_loongson_2e 3001
+#define bfd_mach_mips_loongson_2f 3002
+#define bfd_mach_mips_loongson_3a 3003
+#define bfd_mach_mips_sb1 12310201 /* octal 'SB', 01 */
+#define bfd_mach_mips_octeon 6501
+#define bfd_mach_mips_octeonp 6601
+#define bfd_mach_mips_octeon2 6502
+#define bfd_mach_mips_xlr 887682 /* decimal 'XLR' */
+#define bfd_mach_mipsisa32 32
+#define bfd_mach_mipsisa32r2 33
+#define bfd_mach_mipsisa64 64
+#define bfd_mach_mipsisa64r2 65
+#define bfd_mach_mips_micromips 96
+ bfd_arch_i386, /* Intel 386 */
+#define bfd_mach_i386_intel_syntax (1 << 0)
+#define bfd_mach_i386_i8086 (1 << 1)
+#define bfd_mach_i386_i386 (1 << 2)
+#define bfd_mach_x86_64 (1 << 3)
+#define bfd_mach_x64_32 (1 << 4)
+#define bfd_mach_i386_i386_intel_syntax (bfd_mach_i386_i386 | bfd_mach_i386_intel_syntax)
+#define bfd_mach_x86_64_intel_syntax (bfd_mach_x86_64 | bfd_mach_i386_intel_syntax)
+#define bfd_mach_x64_32_intel_syntax (bfd_mach_x64_32 | bfd_mach_i386_intel_syntax)
+ bfd_arch_l1om, /* Intel L1OM */
+#define bfd_mach_l1om (1 << 5)
+#define bfd_mach_l1om_intel_syntax (bfd_mach_l1om | bfd_mach_i386_intel_syntax)
+ bfd_arch_k1om, /* Intel K1OM */
+#define bfd_mach_k1om (1 << 6)
+#define bfd_mach_k1om_intel_syntax (bfd_mach_k1om | bfd_mach_i386_intel_syntax)
+ bfd_arch_we32k, /* AT&T WE32xxx */
+ bfd_arch_tahoe, /* CCI/Harris Tahoe */
+ bfd_arch_i860, /* Intel 860 */
+ bfd_arch_i370, /* IBM 360/370 Mainframes */
+ bfd_arch_romp, /* IBM ROMP PC/RT */
+ bfd_arch_convex, /* Convex */
+ bfd_arch_m88k, /* Motorola 88xxx */
+ bfd_arch_m98k, /* Motorola 98xxx */
+ bfd_arch_pyramid, /* Pyramid Technology */
+ bfd_arch_h8300, /* Renesas H8/300 (formerly Hitachi H8/300) */
+#define bfd_mach_h8300 1
+#define bfd_mach_h8300h 2
+#define bfd_mach_h8300s 3
+#define bfd_mach_h8300hn 4
+#define bfd_mach_h8300sn 5
+#define bfd_mach_h8300sx 6
+#define bfd_mach_h8300sxn 7
+ bfd_arch_pdp11, /* DEC PDP-11 */
+ bfd_arch_plugin,
+ bfd_arch_powerpc, /* PowerPC */
+#define bfd_mach_ppc 32
+#define bfd_mach_ppc64 64
+#define bfd_mach_ppc_403 403
+#define bfd_mach_ppc_403gc 4030
+#define bfd_mach_ppc_405 405
+#define bfd_mach_ppc_505 505
+#define bfd_mach_ppc_601 601
+#define bfd_mach_ppc_602 602
+#define bfd_mach_ppc_603 603
+#define bfd_mach_ppc_ec603e 6031
+#define bfd_mach_ppc_604 604
+#define bfd_mach_ppc_620 620
+#define bfd_mach_ppc_630 630
+#define bfd_mach_ppc_750 750
+#define bfd_mach_ppc_860 860
+#define bfd_mach_ppc_a35 35
+#define bfd_mach_ppc_rs64ii 642
+#define bfd_mach_ppc_rs64iii 643
+#define bfd_mach_ppc_7400 7400
+#define bfd_mach_ppc_e500 500
+#define bfd_mach_ppc_e500mc 5001
+#define bfd_mach_ppc_e500mc64 5005
+#define bfd_mach_ppc_e5500 5006
+#define bfd_mach_ppc_e6500 5007
+#define bfd_mach_ppc_titan 83
+#define bfd_mach_ppc_vle 84
+ bfd_arch_rs6000, /* IBM RS/6000 */
+#define bfd_mach_rs6k 6000
+#define bfd_mach_rs6k_rs1 6001
+#define bfd_mach_rs6k_rsc 6003
+#define bfd_mach_rs6k_rs2 6002
+ bfd_arch_hppa, /* HP PA RISC */
+#define bfd_mach_hppa10 10
+#define bfd_mach_hppa11 11
+#define bfd_mach_hppa20 20
+#define bfd_mach_hppa20w 25
+ bfd_arch_d10v, /* Mitsubishi D10V */
+#define bfd_mach_d10v 1
+#define bfd_mach_d10v_ts2 2
+#define bfd_mach_d10v_ts3 3
+ bfd_arch_d30v, /* Mitsubishi D30V */
+ bfd_arch_dlx, /* DLX */
+ bfd_arch_m68hc11, /* Motorola 68HC11 */
+ bfd_arch_m68hc12, /* Motorola 68HC12 */
+#define bfd_mach_m6812_default 0
+#define bfd_mach_m6812 1
+#define bfd_mach_m6812s 2
+ bfd_arch_m9s12x, /* Freescale S12X */
+ bfd_arch_m9s12xg, /* Freescale XGATE */
+ bfd_arch_z8k, /* Zilog Z8000 */
+#define bfd_mach_z8001 1
+#define bfd_mach_z8002 2
+ bfd_arch_h8500, /* Renesas H8/500 (formerly Hitachi H8/500) */
+ bfd_arch_sh, /* Renesas / SuperH SH (formerly Hitachi SH) */
+#define bfd_mach_sh 1
+#define bfd_mach_sh2 0x20
+#define bfd_mach_sh_dsp 0x2d
+#define bfd_mach_sh2a 0x2a
+#define bfd_mach_sh2a_nofpu 0x2b
+#define bfd_mach_sh2a_nofpu_or_sh4_nommu_nofpu 0x2a1
+#define bfd_mach_sh2a_nofpu_or_sh3_nommu 0x2a2
+#define bfd_mach_sh2a_or_sh4 0x2a3
+#define bfd_mach_sh2a_or_sh3e 0x2a4
+#define bfd_mach_sh2e 0x2e
+#define bfd_mach_sh3 0x30
+#define bfd_mach_sh3_nommu 0x31
+#define bfd_mach_sh3_dsp 0x3d
+#define bfd_mach_sh3e 0x3e
+#define bfd_mach_sh4 0x40
+#define bfd_mach_sh4_nofpu 0x41
+#define bfd_mach_sh4_nommu_nofpu 0x42
+#define bfd_mach_sh4a 0x4a
+#define bfd_mach_sh4a_nofpu 0x4b
+#define bfd_mach_sh4al_dsp 0x4d
+#define bfd_mach_sh5 0x50
+ bfd_arch_alpha, /* Dec Alpha */
+#define bfd_mach_alpha_ev4 0x10
+#define bfd_mach_alpha_ev5 0x20
+#define bfd_mach_alpha_ev6 0x30
+ bfd_arch_arm, /* Advanced Risc Machines ARM. */
+#define bfd_mach_arm_unknown 0
+#define bfd_mach_arm_2 1
+#define bfd_mach_arm_2a 2
+#define bfd_mach_arm_3 3
+#define bfd_mach_arm_3M 4
+#define bfd_mach_arm_4 5
+#define bfd_mach_arm_4T 6
+#define bfd_mach_arm_5 7
+#define bfd_mach_arm_5T 8
+#define bfd_mach_arm_5TE 9
+#define bfd_mach_arm_XScale 10
+#define bfd_mach_arm_ep9312 11
+#define bfd_mach_arm_iWMMXt 12
+#define bfd_mach_arm_iWMMXt2 13
+ bfd_arch_ns32k, /* National Semiconductors ns32000 */
+ bfd_arch_w65, /* WDC 65816 */
+ bfd_arch_tic30, /* Texas Instruments TMS320C30 */
+ bfd_arch_tic4x, /* Texas Instruments TMS320C3X/4X */
+#define bfd_mach_tic3x 30
+#define bfd_mach_tic4x 40
+ bfd_arch_tic54x, /* Texas Instruments TMS320C54X */
+ bfd_arch_tic6x, /* Texas Instruments TMS320C6X */
+ bfd_arch_tic80, /* TI TMS320c80 (MVP) */
+ bfd_arch_v850, /* NEC V850 */
+#define bfd_mach_v850 1
+#define bfd_mach_v850e 'E'
+#define bfd_mach_v850e1 '1'
+#define bfd_mach_v850e2 0x4532
+#define bfd_mach_v850e2v3 0x45325633
+ bfd_arch_arc, /* ARC Cores */
+#define bfd_mach_arc_5 5
+#define bfd_mach_arc_6 6
+#define bfd_mach_arc_7 7
+#define bfd_mach_arc_8 8
+ bfd_arch_m32c, /* Renesas M16C/M32C. */
+#define bfd_mach_m16c 0x75
+#define bfd_mach_m32c 0x78
+ bfd_arch_m32r, /* Renesas M32R (formerly Mitsubishi M32R/D) */
+#define bfd_mach_m32r 1 /* For backwards compatibility. */
+#define bfd_mach_m32rx 'x'
+#define bfd_mach_m32r2 '2'
+ bfd_arch_mn10200, /* Matsushita MN10200 */
+ bfd_arch_mn10300, /* Matsushita MN10300 */
+#define bfd_mach_mn10300 300
+#define bfd_mach_am33 330
+#define bfd_mach_am33_2 332
+ bfd_arch_fr30,
+#define bfd_mach_fr30 0x46523330
+ bfd_arch_frv,
+#define bfd_mach_frv 1
+#define bfd_mach_frvsimple 2
+#define bfd_mach_fr300 300
+#define bfd_mach_fr400 400
+#define bfd_mach_fr450 450
+#define bfd_mach_frvtomcat 499 /* fr500 prototype */
+#define bfd_mach_fr500 500
+#define bfd_mach_fr550 550
+ bfd_arch_moxie, /* The moxie processor */
+#define bfd_mach_moxie 1
+ bfd_arch_mcore,
+ bfd_arch_mep,
+#define bfd_mach_mep 1
+#define bfd_mach_mep_h1 0x6831
+#define bfd_mach_mep_c5 0x6335
+ bfd_arch_ia64, /* HP/Intel ia64 */
+#define bfd_mach_ia64_elf64 64
+#define bfd_mach_ia64_elf32 32
+ bfd_arch_ip2k, /* Ubicom IP2K microcontrollers. */
+#define bfd_mach_ip2022 1
+#define bfd_mach_ip2022ext 2
+ bfd_arch_iq2000, /* Vitesse IQ2000. */
+#define bfd_mach_iq2000 1
+#define bfd_mach_iq10 2
+ bfd_arch_epiphany, /* Adapteva EPIPHANY */
+#define bfd_mach_epiphany16 1
+#define bfd_mach_epiphany32 2
+ bfd_arch_mt,
+#define bfd_mach_ms1 1
+#define bfd_mach_mrisc2 2
+#define bfd_mach_ms2 3
+ bfd_arch_pj,
+ bfd_arch_avr, /* Atmel AVR microcontrollers. */
+#define bfd_mach_avr1 1
+#define bfd_mach_avr2 2
+#define bfd_mach_avr25 25
+#define bfd_mach_avr3 3
+#define bfd_mach_avr31 31
+#define bfd_mach_avr35 35
+#define bfd_mach_avr4 4
+#define bfd_mach_avr5 5
+#define bfd_mach_avr51 51
+#define bfd_mach_avr6 6
+#define bfd_mach_avrxmega1 101
+#define bfd_mach_avrxmega2 102
+#define bfd_mach_avrxmega3 103
+#define bfd_mach_avrxmega4 104
+#define bfd_mach_avrxmega5 105
+#define bfd_mach_avrxmega6 106
+#define bfd_mach_avrxmega7 107
+ bfd_arch_bfin, /* ADI Blackfin */
+#define bfd_mach_bfin 1
+ bfd_arch_cr16, /* National Semiconductor CompactRISC (ie CR16). */
+#define bfd_mach_cr16 1
+ bfd_arch_cr16c, /* National Semiconductor CompactRISC. */
+#define bfd_mach_cr16c 1
+ bfd_arch_crx, /* National Semiconductor CRX. */
+#define bfd_mach_crx 1
+ bfd_arch_cris, /* Axis CRIS */
+#define bfd_mach_cris_v0_v10 255
+#define bfd_mach_cris_v32 32
+#define bfd_mach_cris_v10_v32 1032
+ bfd_arch_rl78,
+#define bfd_mach_rl78 0x75
+ bfd_arch_rx, /* Renesas RX. */
+#define bfd_mach_rx 0x75
+ bfd_arch_s390, /* IBM s390 */
+#define bfd_mach_s390_31 31
+#define bfd_mach_s390_64 64
+ bfd_arch_score, /* Sunplus score */
+#define bfd_mach_score3 3
+#define bfd_mach_score7 7
+ bfd_arch_openrisc, /* OpenRISC */
+ bfd_arch_mmix, /* Donald Knuth's educational processor. */
+ bfd_arch_xstormy16,
+#define bfd_mach_xstormy16 1
+ bfd_arch_msp430, /* Texas Instruments MSP430 architecture. */
+#define bfd_mach_msp11 11
+#define bfd_mach_msp110 110
+#define bfd_mach_msp12 12
+#define bfd_mach_msp13 13
+#define bfd_mach_msp14 14
+#define bfd_mach_msp15 15
+#define bfd_mach_msp16 16
+#define bfd_mach_msp21 21
+#define bfd_mach_msp31 31
+#define bfd_mach_msp32 32
+#define bfd_mach_msp33 33
+#define bfd_mach_msp41 41
+#define bfd_mach_msp42 42
+#define bfd_mach_msp43 43
+#define bfd_mach_msp44 44
+ bfd_arch_xc16x, /* Infineon's XC16X Series. */
+#define bfd_mach_xc16x 1
+#define bfd_mach_xc16xl 2
+#define bfd_mach_xc16xs 3
+ bfd_arch_xgate, /* Freescale XGATE */
+#define bfd_mach_xgate 1
+ bfd_arch_xtensa, /* Tensilica's Xtensa cores. */
+#define bfd_mach_xtensa 1
+ bfd_arch_z80,
+#define bfd_mach_z80strict 1 /* No undocumented opcodes. */
+#define bfd_mach_z80 3 /* With ixl, ixh, iyl, and iyh. */
+#define bfd_mach_z80full 7 /* All undocumented instructions. */
+#define bfd_mach_r800 11 /* R800: successor with multiplication. */
+ bfd_arch_lm32, /* Lattice Mico32 */
+#define bfd_mach_lm32 1
+ bfd_arch_microblaze,/* Xilinx MicroBlaze. */
+ bfd_arch_tilepro, /* Tilera TILEPro */
+ bfd_arch_tilegx, /* Tilera TILE-Gx */
+#define bfd_mach_tilepro 1
+#define bfd_mach_tilegx 1
+#define bfd_mach_tilegx32 2
+ bfd_arch_last
+ };
+
+typedef struct bfd_arch_info
+{
+ int bits_per_word;
+ int bits_per_address;
+ int bits_per_byte;
+ enum bfd_architecture arch;
+ unsigned long mach;
+ const char *arch_name;
+ const char *printable_name;
+ unsigned int section_align_power;
+ /* TRUE if this is the default machine for the architecture.
+ The default arch should be the first entry for an arch so that
+ all the entries for that arch can be accessed via <<next>>. */
+ bfd_boolean the_default;
+ const struct bfd_arch_info * (*compatible)
+ (const struct bfd_arch_info *a, const struct bfd_arch_info *b);
+
+ bfd_boolean (*scan) (const struct bfd_arch_info *, const char *);
+
+ /* Allocate via bfd_malloc and return a fill buffer of size COUNT. If
+ IS_BIGENDIAN is TRUE, the order of bytes is big endian. If CODE is
+ TRUE, the buffer contains code. */
+ void *(*fill) (bfd_size_type count, bfd_boolean is_bigendian,
+ bfd_boolean code);
+
+ const struct bfd_arch_info *next;
+}
+bfd_arch_info_type;
+
+const char *bfd_printable_name (bfd *abfd);
+
+const bfd_arch_info_type *bfd_scan_arch (const char *string);
+
+const char **bfd_arch_list (void);
+
+const bfd_arch_info_type *bfd_arch_get_compatible
+ (const bfd *abfd, const bfd *bbfd, bfd_boolean accept_unknowns);
+
+void bfd_set_arch_info (bfd *abfd, const bfd_arch_info_type *arg);
+
+enum bfd_architecture bfd_get_arch (bfd *abfd);
+
+unsigned long bfd_get_mach (bfd *abfd);
+
+unsigned int bfd_arch_bits_per_byte (bfd *abfd);
+
+unsigned int bfd_arch_bits_per_address (bfd *abfd);
+
+const bfd_arch_info_type *bfd_get_arch_info (bfd *abfd);
+
+const bfd_arch_info_type *bfd_lookup_arch
+ (enum bfd_architecture arch, unsigned long machine);
+
+const char *bfd_printable_arch_mach
+ (enum bfd_architecture arch, unsigned long machine);
+
+unsigned int bfd_octets_per_byte (bfd *abfd);
+
+unsigned int bfd_arch_mach_octets_per_byte
+ (enum bfd_architecture arch, unsigned long machine);
+
+/* Extracted from reloc.c. */
+typedef enum bfd_reloc_status
+{
+ /* No errors detected. */
+ bfd_reloc_ok,
+
+ /* The relocation was performed, but there was an overflow. */
+ bfd_reloc_overflow,
+
+ /* The address to relocate was not within the section supplied. */
+ bfd_reloc_outofrange,
+
+ /* Used by special functions. */
+ bfd_reloc_continue,
+
+ /* Unsupported relocation size requested. */
+ bfd_reloc_notsupported,
+
+ /* Unused. */
+ bfd_reloc_other,
+
+ /* The symbol to relocate against was undefined. */
+ bfd_reloc_undefined,
+
+ /* The relocation was performed, but may not be ok - presently
+ generated only when linking i960 coff files with i960 b.out
+ symbols. If this type is returned, the error_message argument
+ to bfd_perform_relocation will be set. */
+ bfd_reloc_dangerous
+ }
+ bfd_reloc_status_type;
+
+
+typedef struct reloc_cache_entry
+{
+ /* A pointer into the canonical table of pointers. */
+ struct bfd_symbol **sym_ptr_ptr;
+
+ /* offset in section. */
+ bfd_size_type address;
+
+ /* addend for relocation value. */
+ bfd_vma addend;
+
+ /* Pointer to how to perform the required relocation. */
+ reloc_howto_type *howto;
+
+}
+arelent;
+
+enum complain_overflow
+{
+ /* Do not complain on overflow. */
+ complain_overflow_dont,
+
+ /* Complain if the value overflows when considered as a signed
+ number one bit larger than the field. ie. A bitfield of N bits
+ is allowed to represent -2**n to 2**n-1. */
+ complain_overflow_bitfield,
+
+ /* Complain if the value overflows when considered as a signed
+ number. */
+ complain_overflow_signed,
+
+ /* Complain if the value overflows when considered as an
+ unsigned number. */
+ complain_overflow_unsigned
+};
+
+struct reloc_howto_struct
+{
+ /* The type field has mainly a documentary use - the back end can
+ do what it wants with it, though normally the back end's
+ external idea of what a reloc number is stored
+ in this field. For example, a PC relative word relocation
+ in a coff environment has the type 023 - because that's
+ what the outside world calls a R_PCRWORD reloc. */
+ unsigned int type;
+
+ /* The value the final relocation is shifted right by. This drops
+ unwanted data from the relocation. */
+ unsigned int rightshift;
+
+ /* The size of the item to be relocated. This is *not* a
+ power-of-two measure. To get the number of bytes operated
+ on by a type of relocation, use bfd_get_reloc_size. */
+ int size;
+
+ /* The number of bits in the item to be relocated. This is used
+ when doing overflow checking. */
+ unsigned int bitsize;
+
+ /* The relocation is relative to the field being relocated. */
+ bfd_boolean pc_relative;
+
+ /* The bit position of the reloc value in the destination.
+ The relocated value is left shifted by this amount. */
+ unsigned int bitpos;
+
+ /* What type of overflow error should be checked for when
+ relocating. */
+ enum complain_overflow complain_on_overflow;
+
+ /* If this field is non null, then the supplied function is
+ called rather than the normal function. This allows really
+ strange relocation methods to be accommodated (e.g., i960 callj
+ instructions). */
+ bfd_reloc_status_type (*special_function)
+ (bfd *, arelent *, struct bfd_symbol *, void *, asection *,
+ bfd *, char **);
+
+ /* The textual name of the relocation type. */
+ char *name;
+
+ /* Some formats record a relocation addend in the section contents
+ rather than with the relocation. For ELF formats this is the
+ distinction between USE_REL and USE_RELA (though the code checks
+ for USE_REL == 1/0). The value of this field is TRUE if the
+ addend is recorded with the section contents; when performing a
+ partial link (ld -r) the section contents (the data) will be
+ modified. The value of this field is FALSE if addends are
+ recorded with the relocation (in arelent.addend); when performing
+ a partial link the relocation will be modified.
+ All relocations for all ELF USE_RELA targets should set this field
+ to FALSE (values of TRUE should be looked on with suspicion).
+ However, the converse is not true: not all relocations of all ELF
+ USE_REL targets set this field to TRUE. Why this is so is peculiar
+ to each particular target. For relocs that aren't used in partial
+ links (e.g. GOT stuff) it doesn't matter what this is set to. */
+ bfd_boolean partial_inplace;
+
+ /* src_mask selects the part of the instruction (or data) to be used
+ in the relocation sum. If the target relocations don't have an
+ addend in the reloc, eg. ELF USE_REL, src_mask will normally equal
+ dst_mask to extract the addend from the section contents. If
+ relocations do have an addend in the reloc, eg. ELF USE_RELA, this
+ field should be zero. Non-zero values for ELF USE_RELA targets are
+ bogus as in those cases the value in the dst_mask part of the
+ section contents should be treated as garbage. */
+ bfd_vma src_mask;
+
+ /* dst_mask selects which parts of the instruction (or data) are
+ replaced with a relocated value. */
+ bfd_vma dst_mask;
+
+ /* When some formats create PC relative instructions, they leave
+ the value of the pc of the place being relocated in the offset
+ slot of the instruction, so that a PC relative relocation can
+ be made just by adding in an ordinary offset (e.g., sun3 a.out).
+ Some formats leave the displacement part of an instruction
+ empty (e.g., m88k bcs); this flag signals the fact. */
+ bfd_boolean pcrel_offset;
+};
+
+#define HOWTO(C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC) \
+ { (unsigned) C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC }
+#define NEWHOWTO(FUNCTION, NAME, SIZE, REL, IN) \
+ HOWTO (0, 0, SIZE, 0, REL, 0, complain_overflow_dont, FUNCTION, \
+ NAME, FALSE, 0, 0, IN)
+
+#define EMPTY_HOWTO(C) \
+ HOWTO ((C), 0, 0, 0, FALSE, 0, complain_overflow_dont, NULL, \
+ NULL, FALSE, 0, 0, FALSE)
+
+#define HOWTO_PREPARE(relocation, symbol) \
+ { \
+ if (symbol != NULL) \
+ { \
+ if (bfd_is_com_section (symbol->section)) \
+ { \
+ relocation = 0; \
+ } \
+ else \
+ { \
+ relocation = symbol->value; \
+ } \
+ } \
+ }
+
+unsigned int bfd_get_reloc_size (reloc_howto_type *);
+
+typedef struct relent_chain
+{
+ arelent relent;
+ struct relent_chain *next;
+}
+arelent_chain;
+
+bfd_reloc_status_type bfd_check_overflow
+ (enum complain_overflow how,
+ unsigned int bitsize,
+ unsigned int rightshift,
+ unsigned int addrsize,
+ bfd_vma relocation);
+
+bfd_reloc_status_type bfd_perform_relocation
+ (bfd *abfd,
+ arelent *reloc_entry,
+ void *data,
+ asection *input_section,
+ bfd *output_bfd,
+ char **error_message);
+
+bfd_reloc_status_type bfd_install_relocation
+ (bfd *abfd,
+ arelent *reloc_entry,
+ void *data, bfd_vma data_start,
+ asection *input_section,
+ char **error_message);
+
+enum bfd_reloc_code_real {
+ _dummy_first_bfd_reloc_code_real,
+
+
+/* Basic absolute relocations of N bits. */
+ BFD_RELOC_64,
+ BFD_RELOC_32,
+ BFD_RELOC_26,
+ BFD_RELOC_24,
+ BFD_RELOC_16,
+ BFD_RELOC_14,
+ BFD_RELOC_8,
+
+/* PC-relative relocations. Sometimes these are relative to the address
+of the relocation itself; sometimes they are relative to the start of
+the section containing the relocation. It depends on the specific target.
+
+The 24-bit relocation is used in some Intel 960 configurations. */
+ BFD_RELOC_64_PCREL,
+ BFD_RELOC_32_PCREL,
+ BFD_RELOC_24_PCREL,
+ BFD_RELOC_16_PCREL,
+ BFD_RELOC_12_PCREL,
+ BFD_RELOC_8_PCREL,
+
+/* Section relative relocations. Some targets need this for DWARF2. */
+ BFD_RELOC_32_SECREL,
+
+/* For ELF. */
+ BFD_RELOC_32_GOT_PCREL,
+ BFD_RELOC_16_GOT_PCREL,
+ BFD_RELOC_8_GOT_PCREL,
+ BFD_RELOC_32_GOTOFF,
+ BFD_RELOC_16_GOTOFF,
+ BFD_RELOC_LO16_GOTOFF,
+ BFD_RELOC_HI16_GOTOFF,
+ BFD_RELOC_HI16_S_GOTOFF,
+ BFD_RELOC_8_GOTOFF,
+ BFD_RELOC_64_PLT_PCREL,
+ BFD_RELOC_32_PLT_PCREL,
+ BFD_RELOC_24_PLT_PCREL,
+ BFD_RELOC_16_PLT_PCREL,
+ BFD_RELOC_8_PLT_PCREL,
+ BFD_RELOC_64_PLTOFF,
+ BFD_RELOC_32_PLTOFF,
+ BFD_RELOC_16_PLTOFF,
+ BFD_RELOC_LO16_PLTOFF,
+ BFD_RELOC_HI16_PLTOFF,
+ BFD_RELOC_HI16_S_PLTOFF,
+ BFD_RELOC_8_PLTOFF,
+
+/* Relocations used by 68K ELF. */
+ BFD_RELOC_68K_GLOB_DAT,
+ BFD_RELOC_68K_JMP_SLOT,
+ BFD_RELOC_68K_RELATIVE,
+ BFD_RELOC_68K_TLS_GD32,
+ BFD_RELOC_68K_TLS_GD16,
+ BFD_RELOC_68K_TLS_GD8,
+ BFD_RELOC_68K_TLS_LDM32,
+ BFD_RELOC_68K_TLS_LDM16,
+ BFD_RELOC_68K_TLS_LDM8,
+ BFD_RELOC_68K_TLS_LDO32,
+ BFD_RELOC_68K_TLS_LDO16,
+ BFD_RELOC_68K_TLS_LDO8,
+ BFD_RELOC_68K_TLS_IE32,
+ BFD_RELOC_68K_TLS_IE16,
+ BFD_RELOC_68K_TLS_IE8,
+ BFD_RELOC_68K_TLS_LE32,
+ BFD_RELOC_68K_TLS_LE16,
+ BFD_RELOC_68K_TLS_LE8,
+
+/* Linkage-table relative. */
+ BFD_RELOC_32_BASEREL,
+ BFD_RELOC_16_BASEREL,
+ BFD_RELOC_LO16_BASEREL,
+ BFD_RELOC_HI16_BASEREL,
+ BFD_RELOC_HI16_S_BASEREL,
+ BFD_RELOC_8_BASEREL,
+ BFD_RELOC_RVA,
+
+/* Absolute 8-bit relocation, but used to form an address like 0xFFnn. */
+ BFD_RELOC_8_FFnn,
+
+/* These PC-relative relocations are stored as word displacements --
+i.e., byte displacements shifted right two bits. The 30-bit word
+displacement (<<32_PCREL_S2>> -- 32 bits, shifted 2) is used on the
+SPARC. (SPARC tools generally refer to this as <<WDISP30>>.) The
+signed 16-bit displacement is used on the MIPS, and the 23-bit
+displacement is used on the Alpha. */
+ BFD_RELOC_32_PCREL_S2,
+ BFD_RELOC_16_PCREL_S2,
+ BFD_RELOC_23_PCREL_S2,
+
+/* High 22 bits and low 10 bits of 32-bit value, placed into lower bits of
+the target word. These are used on the SPARC. */
+ BFD_RELOC_HI22,
+ BFD_RELOC_LO10,
+
+/* For systems that allocate a Global Pointer register, these are
+displacements off that register. These relocation types are
+handled specially, because the value the register will have is
+decided relatively late. */
+ BFD_RELOC_GPREL16,
+ BFD_RELOC_GPREL32,
+
+/* Reloc types used for i960/b.out. */
+ BFD_RELOC_I960_CALLJ,
+
+/* SPARC ELF relocations. There is probably some overlap with other
+relocation types already defined. */
+ BFD_RELOC_NONE,
+ BFD_RELOC_SPARC_WDISP22,
+ BFD_RELOC_SPARC22,
+ BFD_RELOC_SPARC13,
+ BFD_RELOC_SPARC_GOT10,
+ BFD_RELOC_SPARC_GOT13,
+ BFD_RELOC_SPARC_GOT22,
+ BFD_RELOC_SPARC_PC10,
+ BFD_RELOC_SPARC_PC22,
+ BFD_RELOC_SPARC_WPLT30,
+ BFD_RELOC_SPARC_COPY,
+ BFD_RELOC_SPARC_GLOB_DAT,
+ BFD_RELOC_SPARC_JMP_SLOT,
+ BFD_RELOC_SPARC_RELATIVE,
+ BFD_RELOC_SPARC_UA16,
+ BFD_RELOC_SPARC_UA32,
+ BFD_RELOC_SPARC_UA64,
+ BFD_RELOC_SPARC_GOTDATA_HIX22,
+ BFD_RELOC_SPARC_GOTDATA_LOX10,
+ BFD_RELOC_SPARC_GOTDATA_OP_HIX22,
+ BFD_RELOC_SPARC_GOTDATA_OP_LOX10,
+ BFD_RELOC_SPARC_GOTDATA_OP,
+ BFD_RELOC_SPARC_JMP_IREL,
+ BFD_RELOC_SPARC_IRELATIVE,
+
+/* I think these are specific to SPARC a.out (e.g., Sun 4). */
+ BFD_RELOC_SPARC_BASE13,
+ BFD_RELOC_SPARC_BASE22,
+
+/* SPARC64 relocations */
+#define BFD_RELOC_SPARC_64 BFD_RELOC_64
+ BFD_RELOC_SPARC_10,
+ BFD_RELOC_SPARC_11,
+ BFD_RELOC_SPARC_OLO10,
+ BFD_RELOC_SPARC_HH22,
+ BFD_RELOC_SPARC_HM10,
+ BFD_RELOC_SPARC_LM22,
+ BFD_RELOC_SPARC_PC_HH22,
+ BFD_RELOC_SPARC_PC_HM10,
+ BFD_RELOC_SPARC_PC_LM22,
+ BFD_RELOC_SPARC_WDISP16,
+ BFD_RELOC_SPARC_WDISP19,
+ BFD_RELOC_SPARC_7,
+ BFD_RELOC_SPARC_6,
+ BFD_RELOC_SPARC_5,
+#define BFD_RELOC_SPARC_DISP64 BFD_RELOC_64_PCREL
+ BFD_RELOC_SPARC_PLT32,
+ BFD_RELOC_SPARC_PLT64,
+ BFD_RELOC_SPARC_HIX22,
+ BFD_RELOC_SPARC_LOX10,
+ BFD_RELOC_SPARC_H44,
+ BFD_RELOC_SPARC_M44,
+ BFD_RELOC_SPARC_L44,
+ BFD_RELOC_SPARC_REGISTER,
+ BFD_RELOC_SPARC_H34,
+ BFD_RELOC_SPARC_SIZE32,
+ BFD_RELOC_SPARC_SIZE64,
+ BFD_RELOC_SPARC_WDISP10,
+
+/* SPARC little endian relocation */
+ BFD_RELOC_SPARC_REV32,
+
+/* SPARC TLS relocations */
+ BFD_RELOC_SPARC_TLS_GD_HI22,
+ BFD_RELOC_SPARC_TLS_GD_LO10,
+ BFD_RELOC_SPARC_TLS_GD_ADD,
+ BFD_RELOC_SPARC_TLS_GD_CALL,
+ BFD_RELOC_SPARC_TLS_LDM_HI22,
+ BFD_RELOC_SPARC_TLS_LDM_LO10,
+ BFD_RELOC_SPARC_TLS_LDM_ADD,
+ BFD_RELOC_SPARC_TLS_LDM_CALL,
+ BFD_RELOC_SPARC_TLS_LDO_HIX22,
+ BFD_RELOC_SPARC_TLS_LDO_LOX10,
+ BFD_RELOC_SPARC_TLS_LDO_ADD,
+ BFD_RELOC_SPARC_TLS_IE_HI22,
+ BFD_RELOC_SPARC_TLS_IE_LO10,
+ BFD_RELOC_SPARC_TLS_IE_LD,
+ BFD_RELOC_SPARC_TLS_IE_LDX,
+ BFD_RELOC_SPARC_TLS_IE_ADD,
+ BFD_RELOC_SPARC_TLS_LE_HIX22,
+ BFD_RELOC_SPARC_TLS_LE_LOX10,
+ BFD_RELOC_SPARC_TLS_DTPMOD32,
+ BFD_RELOC_SPARC_TLS_DTPMOD64,
+ BFD_RELOC_SPARC_TLS_DTPOFF32,
+ BFD_RELOC_SPARC_TLS_DTPOFF64,
+ BFD_RELOC_SPARC_TLS_TPOFF32,
+ BFD_RELOC_SPARC_TLS_TPOFF64,
+
+/* SPU Relocations. */
+ BFD_RELOC_SPU_IMM7,
+ BFD_RELOC_SPU_IMM8,
+ BFD_RELOC_SPU_IMM10,
+ BFD_RELOC_SPU_IMM10W,
+ BFD_RELOC_SPU_IMM16,
+ BFD_RELOC_SPU_IMM16W,
+ BFD_RELOC_SPU_IMM18,
+ BFD_RELOC_SPU_PCREL9a,
+ BFD_RELOC_SPU_PCREL9b,
+ BFD_RELOC_SPU_PCREL16,
+ BFD_RELOC_SPU_LO16,
+ BFD_RELOC_SPU_HI16,
+ BFD_RELOC_SPU_PPU32,
+ BFD_RELOC_SPU_PPU64,
+ BFD_RELOC_SPU_ADD_PIC,
+
+/* Alpha ECOFF and ELF relocations. Some of these treat the symbol or
+"addend" in some special way.
+For GPDISP_HI16 ("gpdisp") relocations, the symbol is ignored when
+writing; when reading, it will be the absolute section symbol. The
+addend is the displacement in bytes of the "lda" instruction from
+the "ldah" instruction (which is at the address of this reloc). */
+ BFD_RELOC_ALPHA_GPDISP_HI16,
+
+/* For GPDISP_LO16 ("ignore") relocations, the symbol is handled as
+with GPDISP_HI16 relocs. The addend is ignored when writing the
+relocations out, and is filled in with the file's GP value on
+reading, for convenience. */
+ BFD_RELOC_ALPHA_GPDISP_LO16,
+
+/* The ELF GPDISP relocation is exactly the same as the GPDISP_HI16
+relocation except that there is no accompanying GPDISP_LO16
+relocation. */
+ BFD_RELOC_ALPHA_GPDISP,
+
+/* The Alpha LITERAL/LITUSE relocs are produced by a symbol reference;
+the assembler turns it into a LDQ instruction to load the address of
+the symbol, and then fills in a register in the real instruction.
+
+The LITERAL reloc, at the LDQ instruction, refers to the .lita
+section symbol. The addend is ignored when writing, but is filled
+in with the file's GP value on reading, for convenience, as with the
+GPDISP_LO16 reloc.
+
+The ELF_LITERAL reloc is somewhere between 16_GOTOFF and GPDISP_LO16.
+It should refer to the symbol to be referenced, as with 16_GOTOFF,
+but it generates output not based on the position within the .got
+section, but relative to the GP value chosen for the file during the
+final link stage.
+
+The LITUSE reloc, on the instruction using the loaded address, gives
+information to the linker that it might be able to use to optimize
+away some literal section references. The symbol is ignored (read
+as the absolute section symbol), and the "addend" indicates the type
+of instruction using the register:
+1 - "memory" fmt insn
+2 - byte-manipulation (byte offset reg)
+3 - jsr (target of branch) */
+ BFD_RELOC_ALPHA_LITERAL,
+ BFD_RELOC_ALPHA_ELF_LITERAL,
+ BFD_RELOC_ALPHA_LITUSE,
+
+/* The HINT relocation indicates a value that should be filled into the
+"hint" field of a jmp/jsr/ret instruction, for possible branch-
+prediction logic which may be provided on some processors. */
+ BFD_RELOC_ALPHA_HINT,
+
+/* The LINKAGE relocation outputs a linkage pair in the object file,
+which is filled by the linker. */
+ BFD_RELOC_ALPHA_LINKAGE,
+
+/* The CODEADDR relocation outputs a STO_CA in the object file,
+which is filled by the linker. */
+ BFD_RELOC_ALPHA_CODEADDR,
+
+/* The GPREL_HI/LO relocations together form a 32-bit offset from the
+GP register. */
+ BFD_RELOC_ALPHA_GPREL_HI16,
+ BFD_RELOC_ALPHA_GPREL_LO16,
+
+/* Like BFD_RELOC_23_PCREL_S2, except that the source and target must
+share a common GP, and the target address is adjusted for
+STO_ALPHA_STD_GPLOAD. */
+ BFD_RELOC_ALPHA_BRSGP,
+
+/* The NOP relocation outputs a NOP if the longword displacement
+between two procedure entry points is < 2^21. */
+ BFD_RELOC_ALPHA_NOP,
+
+/* The BSR relocation outputs a BSR if the longword displacement
+between two procedure entry points is < 2^21. */
+ BFD_RELOC_ALPHA_BSR,
+
+/* The LDA relocation outputs a LDA if the longword displacement
+between two procedure entry points is < 2^16. */
+ BFD_RELOC_ALPHA_LDA,
+
+/* The BOH relocation outputs a BSR if the longword displacement
+between two procedure entry points is < 2^21, or else a hint. */
+ BFD_RELOC_ALPHA_BOH,
+
+/* Alpha thread-local storage relocations. */
+ BFD_RELOC_ALPHA_TLSGD,
+ BFD_RELOC_ALPHA_TLSLDM,
+ BFD_RELOC_ALPHA_DTPMOD64,
+ BFD_RELOC_ALPHA_GOTDTPREL16,
+ BFD_RELOC_ALPHA_DTPREL64,
+ BFD_RELOC_ALPHA_DTPREL_HI16,
+ BFD_RELOC_ALPHA_DTPREL_LO16,
+ BFD_RELOC_ALPHA_DTPREL16,
+ BFD_RELOC_ALPHA_GOTTPREL16,
+ BFD_RELOC_ALPHA_TPREL64,
+ BFD_RELOC_ALPHA_TPREL_HI16,
+ BFD_RELOC_ALPHA_TPREL_LO16,
+ BFD_RELOC_ALPHA_TPREL16,
+
+/* The MIPS jump instruction. */
+ BFD_RELOC_MIPS_JMP,
+ BFD_RELOC_MICROMIPS_JMP,
+
+/* The MIPS16 jump instruction. */
+ BFD_RELOC_MIPS16_JMP,
+
+/* MIPS16 GP relative reloc. */
+ BFD_RELOC_MIPS16_GPREL,
+
+/* High 16 bits of 32-bit value; simple reloc. */
+ BFD_RELOC_HI16,
+
+/* High 16 bits of 32-bit value but the low 16 bits will be sign
+extended and added to form the final result. If the low 16
+bits form a negative number, we need to add one to the high value
+to compensate for the borrow when the low bits are added. */
+ BFD_RELOC_HI16_S,
+
+/* Low 16 bits. */
+ BFD_RELOC_LO16,
+
+/* High 16 bits of 32-bit pc-relative value */
+ BFD_RELOC_HI16_PCREL,
+
+/* High 16 bits of 32-bit pc-relative value, adjusted */
+ BFD_RELOC_HI16_S_PCREL,
+
+/* Low 16 bits of pc-relative value */
+ BFD_RELOC_LO16_PCREL,
+
+/* Equivalent of BFD_RELOC_MIPS_*, but with the MIPS16 layout of
+16-bit immediate fields */
+ BFD_RELOC_MIPS16_GOT16,
+ BFD_RELOC_MIPS16_CALL16,
+
+/* MIPS16 high 16 bits of 32-bit value. */
+ BFD_RELOC_MIPS16_HI16,
+
+/* MIPS16 high 16 bits of 32-bit value but the low 16 bits will be sign
+extended and added to form the final result. If the low 16
+bits form a negative number, we need to add one to the high value
+to compensate for the borrow when the low bits are added. */
+ BFD_RELOC_MIPS16_HI16_S,
+
+/* MIPS16 low 16 bits. */
+ BFD_RELOC_MIPS16_LO16,
+
+/* MIPS16 TLS relocations */
+ BFD_RELOC_MIPS16_TLS_GD,
+ BFD_RELOC_MIPS16_TLS_LDM,
+ BFD_RELOC_MIPS16_TLS_DTPREL_HI16,
+ BFD_RELOC_MIPS16_TLS_DTPREL_LO16,
+ BFD_RELOC_MIPS16_TLS_GOTTPREL,
+ BFD_RELOC_MIPS16_TLS_TPREL_HI16,
+ BFD_RELOC_MIPS16_TLS_TPREL_LO16,
+
+/* Relocation against a MIPS literal section. */
+ BFD_RELOC_MIPS_LITERAL,
+ BFD_RELOC_MICROMIPS_LITERAL,
+
+/* microMIPS PC-relative relocations. */
+ BFD_RELOC_MICROMIPS_7_PCREL_S1,
+ BFD_RELOC_MICROMIPS_10_PCREL_S1,
+ BFD_RELOC_MICROMIPS_16_PCREL_S1,
+
+/* microMIPS versions of generic BFD relocs. */
+ BFD_RELOC_MICROMIPS_GPREL16,
+ BFD_RELOC_MICROMIPS_HI16,
+ BFD_RELOC_MICROMIPS_HI16_S,
+ BFD_RELOC_MICROMIPS_LO16,
+
+/* MIPS ELF relocations. */
+ BFD_RELOC_MIPS_GOT16,
+ BFD_RELOC_MICROMIPS_GOT16,
+ BFD_RELOC_MIPS_CALL16,
+ BFD_RELOC_MICROMIPS_CALL16,
+ BFD_RELOC_MIPS_GOT_HI16,
+ BFD_RELOC_MICROMIPS_GOT_HI16,
+ BFD_RELOC_MIPS_GOT_LO16,
+ BFD_RELOC_MICROMIPS_GOT_LO16,
+ BFD_RELOC_MIPS_CALL_HI16,
+ BFD_RELOC_MICROMIPS_CALL_HI16,
+ BFD_RELOC_MIPS_CALL_LO16,
+ BFD_RELOC_MICROMIPS_CALL_LO16,
+ BFD_RELOC_MIPS_SUB,
+ BFD_RELOC_MICROMIPS_SUB,
+ BFD_RELOC_MIPS_GOT_PAGE,
+ BFD_RELOC_MICROMIPS_GOT_PAGE,
+ BFD_RELOC_MIPS_GOT_OFST,
+ BFD_RELOC_MICROMIPS_GOT_OFST,
+ BFD_RELOC_MIPS_GOT_DISP,
+ BFD_RELOC_MICROMIPS_GOT_DISP,
+ BFD_RELOC_MIPS_SHIFT5,
+ BFD_RELOC_MIPS_SHIFT6,
+ BFD_RELOC_MIPS_INSERT_A,
+ BFD_RELOC_MIPS_INSERT_B,
+ BFD_RELOC_MIPS_DELETE,
+ BFD_RELOC_MIPS_HIGHEST,
+ BFD_RELOC_MICROMIPS_HIGHEST,
+ BFD_RELOC_MIPS_HIGHER,
+ BFD_RELOC_MICROMIPS_HIGHER,
+ BFD_RELOC_MIPS_SCN_DISP,
+ BFD_RELOC_MICROMIPS_SCN_DISP,
+ BFD_RELOC_MIPS_REL16,
+ BFD_RELOC_MIPS_RELGOT,
+ BFD_RELOC_MIPS_JALR,
+ BFD_RELOC_MICROMIPS_JALR,
+ BFD_RELOC_MIPS_TLS_DTPMOD32,
+ BFD_RELOC_MIPS_TLS_DTPREL32,
+ BFD_RELOC_MIPS_TLS_DTPMOD64,
+ BFD_RELOC_MIPS_TLS_DTPREL64,
+ BFD_RELOC_MIPS_TLS_GD,
+ BFD_RELOC_MICROMIPS_TLS_GD,
+ BFD_RELOC_MIPS_TLS_LDM,
+ BFD_RELOC_MICROMIPS_TLS_LDM,
+ BFD_RELOC_MIPS_TLS_DTPREL_HI16,
+ BFD_RELOC_MICROMIPS_TLS_DTPREL_HI16,
+ BFD_RELOC_MIPS_TLS_DTPREL_LO16,
+ BFD_RELOC_MICROMIPS_TLS_DTPREL_LO16,
+ BFD_RELOC_MIPS_TLS_GOTTPREL,
+ BFD_RELOC_MICROMIPS_TLS_GOTTPREL,
+ BFD_RELOC_MIPS_TLS_TPREL32,
+ BFD_RELOC_MIPS_TLS_TPREL64,
+ BFD_RELOC_MIPS_TLS_TPREL_HI16,
+ BFD_RELOC_MICROMIPS_TLS_TPREL_HI16,
+ BFD_RELOC_MIPS_TLS_TPREL_LO16,
+ BFD_RELOC_MICROMIPS_TLS_TPREL_LO16,
+
+
+/* MIPS ELF relocations (VxWorks and PLT extensions). */
+ BFD_RELOC_MIPS_COPY,
+ BFD_RELOC_MIPS_JUMP_SLOT,
+
+
+/* Moxie ELF relocations. */
+ BFD_RELOC_MOXIE_10_PCREL,
+
+
+/* Fujitsu Frv Relocations. */
+ BFD_RELOC_FRV_LABEL16,
+ BFD_RELOC_FRV_LABEL24,
+ BFD_RELOC_FRV_LO16,
+ BFD_RELOC_FRV_HI16,
+ BFD_RELOC_FRV_GPREL12,
+ BFD_RELOC_FRV_GPRELU12,
+ BFD_RELOC_FRV_GPREL32,
+ BFD_RELOC_FRV_GPRELHI,
+ BFD_RELOC_FRV_GPRELLO,
+ BFD_RELOC_FRV_GOT12,
+ BFD_RELOC_FRV_GOTHI,
+ BFD_RELOC_FRV_GOTLO,
+ BFD_RELOC_FRV_FUNCDESC,
+ BFD_RELOC_FRV_FUNCDESC_GOT12,
+ BFD_RELOC_FRV_FUNCDESC_GOTHI,
+ BFD_RELOC_FRV_FUNCDESC_GOTLO,
+ BFD_RELOC_FRV_FUNCDESC_VALUE,
+ BFD_RELOC_FRV_FUNCDESC_GOTOFF12,
+ BFD_RELOC_FRV_FUNCDESC_GOTOFFHI,
+ BFD_RELOC_FRV_FUNCDESC_GOTOFFLO,
+ BFD_RELOC_FRV_GOTOFF12,
+ BFD_RELOC_FRV_GOTOFFHI,
+ BFD_RELOC_FRV_GOTOFFLO,
+ BFD_RELOC_FRV_GETTLSOFF,
+ BFD_RELOC_FRV_TLSDESC_VALUE,
+ BFD_RELOC_FRV_GOTTLSDESC12,
+ BFD_RELOC_FRV_GOTTLSDESCHI,
+ BFD_RELOC_FRV_GOTTLSDESCLO,
+ BFD_RELOC_FRV_TLSMOFF12,
+ BFD_RELOC_FRV_TLSMOFFHI,
+ BFD_RELOC_FRV_TLSMOFFLO,
+ BFD_RELOC_FRV_GOTTLSOFF12,
+ BFD_RELOC_FRV_GOTTLSOFFHI,
+ BFD_RELOC_FRV_GOTTLSOFFLO,
+ BFD_RELOC_FRV_TLSOFF,
+ BFD_RELOC_FRV_TLSDESC_RELAX,
+ BFD_RELOC_FRV_GETTLSOFF_RELAX,
+ BFD_RELOC_FRV_TLSOFF_RELAX,
+ BFD_RELOC_FRV_TLSMOFF,
+
+
+/* This is a 24bit GOT-relative reloc for the mn10300. */
+ BFD_RELOC_MN10300_GOTOFF24,
+
+/* This is a 32bit GOT-relative reloc for the mn10300, offset by two bytes
+in the instruction. */
+ BFD_RELOC_MN10300_GOT32,
+
+/* This is a 24bit GOT-relative reloc for the mn10300, offset by two bytes
+in the instruction. */
+ BFD_RELOC_MN10300_GOT24,
+
+/* This is a 16bit GOT-relative reloc for the mn10300, offset by two bytes
+in the instruction. */
+ BFD_RELOC_MN10300_GOT16,
+
+/* Copy symbol at runtime. */
+ BFD_RELOC_MN10300_COPY,
+
+/* Create GOT entry. */
+ BFD_RELOC_MN10300_GLOB_DAT,
+
+/* Create PLT entry. */
+ BFD_RELOC_MN10300_JMP_SLOT,
+
+/* Adjust by program base. */
+ BFD_RELOC_MN10300_RELATIVE,
+
+/* Together with another reloc targeted at the same location,
+allows for a value that is the difference of two symbols
+in the same section. */
+ BFD_RELOC_MN10300_SYM_DIFF,
+
+/* The addend of this reloc is an alignment power that must
+be honoured at the offset's location, regardless of linker
+relaxation. */
+ BFD_RELOC_MN10300_ALIGN,
+
+/* Various TLS-related relocations. */
+ BFD_RELOC_MN10300_TLS_GD,
+ BFD_RELOC_MN10300_TLS_LD,
+ BFD_RELOC_MN10300_TLS_LDO,
+ BFD_RELOC_MN10300_TLS_GOTIE,
+ BFD_RELOC_MN10300_TLS_IE,
+ BFD_RELOC_MN10300_TLS_LE,
+ BFD_RELOC_MN10300_TLS_DTPMOD,
+ BFD_RELOC_MN10300_TLS_DTPOFF,
+ BFD_RELOC_MN10300_TLS_TPOFF,
+
+/* This is a 32bit pcrel reloc for the mn10300, offset by two bytes in the
+instruction. */
+ BFD_RELOC_MN10300_32_PCREL,
+
+/* This is a 16bit pcrel reloc for the mn10300, offset by two bytes in the
+instruction. */
+ BFD_RELOC_MN10300_16_PCREL,
+
+
+/* i386/elf relocations */
+ BFD_RELOC_386_GOT32,
+ BFD_RELOC_386_PLT32,
+ BFD_RELOC_386_COPY,
+ BFD_RELOC_386_GLOB_DAT,
+ BFD_RELOC_386_JUMP_SLOT,
+ BFD_RELOC_386_RELATIVE,
+ BFD_RELOC_386_GOTOFF,
+ BFD_RELOC_386_GOTPC,
+ BFD_RELOC_386_TLS_TPOFF,
+ BFD_RELOC_386_TLS_IE,
+ BFD_RELOC_386_TLS_GOTIE,
+ BFD_RELOC_386_TLS_LE,
+ BFD_RELOC_386_TLS_GD,
+ BFD_RELOC_386_TLS_LDM,
+ BFD_RELOC_386_TLS_LDO_32,
+ BFD_RELOC_386_TLS_IE_32,
+ BFD_RELOC_386_TLS_LE_32,
+ BFD_RELOC_386_TLS_DTPMOD32,
+ BFD_RELOC_386_TLS_DTPOFF32,
+ BFD_RELOC_386_TLS_TPOFF32,
+ BFD_RELOC_386_TLS_GOTDESC,
+ BFD_RELOC_386_TLS_DESC_CALL,
+ BFD_RELOC_386_TLS_DESC,
+ BFD_RELOC_386_IRELATIVE,
+
+/* x86-64/elf relocations */
+ BFD_RELOC_X86_64_GOT32,
+ BFD_RELOC_X86_64_PLT32,
+ BFD_RELOC_X86_64_COPY,
+ BFD_RELOC_X86_64_GLOB_DAT,
+ BFD_RELOC_X86_64_JUMP_SLOT,
+ BFD_RELOC_X86_64_RELATIVE,
+ BFD_RELOC_X86_64_GOTPCREL,
+ BFD_RELOC_X86_64_32S,
+ BFD_RELOC_X86_64_DTPMOD64,
+ BFD_RELOC_X86_64_DTPOFF64,
+ BFD_RELOC_X86_64_TPOFF64,
+ BFD_RELOC_X86_64_TLSGD,
+ BFD_RELOC_X86_64_TLSLD,
+ BFD_RELOC_X86_64_DTPOFF32,
+ BFD_RELOC_X86_64_GOTTPOFF,
+ BFD_RELOC_X86_64_TPOFF32,
+ BFD_RELOC_X86_64_GOTOFF64,
+ BFD_RELOC_X86_64_GOTPC32,
+ BFD_RELOC_X86_64_GOT64,
+ BFD_RELOC_X86_64_GOTPCREL64,
+ BFD_RELOC_X86_64_GOTPC64,
+ BFD_RELOC_X86_64_GOTPLT64,
+ BFD_RELOC_X86_64_PLTOFF64,
+ BFD_RELOC_X86_64_GOTPC32_TLSDESC,
+ BFD_RELOC_X86_64_TLSDESC_CALL,
+ BFD_RELOC_X86_64_TLSDESC,
+ BFD_RELOC_X86_64_IRELATIVE,
+
+/* ns32k relocations */
+ BFD_RELOC_NS32K_IMM_8,
+ BFD_RELOC_NS32K_IMM_16,
+ BFD_RELOC_NS32K_IMM_32,
+ BFD_RELOC_NS32K_IMM_8_PCREL,
+ BFD_RELOC_NS32K_IMM_16_PCREL,
+ BFD_RELOC_NS32K_IMM_32_PCREL,
+ BFD_RELOC_NS32K_DISP_8,
+ BFD_RELOC_NS32K_DISP_16,
+ BFD_RELOC_NS32K_DISP_32,
+ BFD_RELOC_NS32K_DISP_8_PCREL,
+ BFD_RELOC_NS32K_DISP_16_PCREL,
+ BFD_RELOC_NS32K_DISP_32_PCREL,
+
+/* PDP11 relocations */
+ BFD_RELOC_PDP11_DISP_8_PCREL,
+ BFD_RELOC_PDP11_DISP_6_PCREL,
+
+/* Picojava relocs. Not all of these appear in object files. */
+ BFD_RELOC_PJ_CODE_HI16,
+ BFD_RELOC_PJ_CODE_LO16,
+ BFD_RELOC_PJ_CODE_DIR16,
+ BFD_RELOC_PJ_CODE_DIR32,
+ BFD_RELOC_PJ_CODE_REL16,
+ BFD_RELOC_PJ_CODE_REL32,
+
+/* Power(rs6000) and PowerPC relocations. */
+ BFD_RELOC_PPC_B26,
+ BFD_RELOC_PPC_BA26,
+ BFD_RELOC_PPC_TOC16,
+ BFD_RELOC_PPC_B16,
+ BFD_RELOC_PPC_B16_BRTAKEN,
+ BFD_RELOC_PPC_B16_BRNTAKEN,
+ BFD_RELOC_PPC_BA16,
+ BFD_RELOC_PPC_BA16_BRTAKEN,
+ BFD_RELOC_PPC_BA16_BRNTAKEN,
+ BFD_RELOC_PPC_COPY,
+ BFD_RELOC_PPC_GLOB_DAT,
+ BFD_RELOC_PPC_JMP_SLOT,
+ BFD_RELOC_PPC_RELATIVE,
+ BFD_RELOC_PPC_LOCAL24PC,
+ BFD_RELOC_PPC_EMB_NADDR32,
+ BFD_RELOC_PPC_EMB_NADDR16,
+ BFD_RELOC_PPC_EMB_NADDR16_LO,
+ BFD_RELOC_PPC_EMB_NADDR16_HI,
+ BFD_RELOC_PPC_EMB_NADDR16_HA,
+ BFD_RELOC_PPC_EMB_SDAI16,
+ BFD_RELOC_PPC_EMB_SDA2I16,
+ BFD_RELOC_PPC_EMB_SDA2REL,
+ BFD_RELOC_PPC_EMB_SDA21,
+ BFD_RELOC_PPC_EMB_MRKREF,
+ BFD_RELOC_PPC_EMB_RELSEC16,
+ BFD_RELOC_PPC_EMB_RELST_LO,
+ BFD_RELOC_PPC_EMB_RELST_HI,
+ BFD_RELOC_PPC_EMB_RELST_HA,
+ BFD_RELOC_PPC_EMB_BIT_FLD,
+ BFD_RELOC_PPC_EMB_RELSDA,
+ BFD_RELOC_PPC_VLE_REL8,
+ BFD_RELOC_PPC_VLE_REL15,
+ BFD_RELOC_PPC_VLE_REL24,
+ BFD_RELOC_PPC_VLE_LO16A,
+ BFD_RELOC_PPC_VLE_LO16D,
+ BFD_RELOC_PPC_VLE_HI16A,
+ BFD_RELOC_PPC_VLE_HI16D,
+ BFD_RELOC_PPC_VLE_HA16A,
+ BFD_RELOC_PPC_VLE_HA16D,
+ BFD_RELOC_PPC_VLE_SDA21,
+ BFD_RELOC_PPC_VLE_SDA21_LO,
+ BFD_RELOC_PPC_VLE_SDAREL_LO16A,
+ BFD_RELOC_PPC_VLE_SDAREL_LO16D,
+ BFD_RELOC_PPC_VLE_SDAREL_HI16A,
+ BFD_RELOC_PPC_VLE_SDAREL_HI16D,
+ BFD_RELOC_PPC_VLE_SDAREL_HA16A,
+ BFD_RELOC_PPC_VLE_SDAREL_HA16D,
+ BFD_RELOC_PPC64_HIGHER,
+ BFD_RELOC_PPC64_HIGHER_S,
+ BFD_RELOC_PPC64_HIGHEST,
+ BFD_RELOC_PPC64_HIGHEST_S,
+ BFD_RELOC_PPC64_TOC16_LO,
+ BFD_RELOC_PPC64_TOC16_HI,
+ BFD_RELOC_PPC64_TOC16_HA,
+ BFD_RELOC_PPC64_TOC,
+ BFD_RELOC_PPC64_PLTGOT16,
+ BFD_RELOC_PPC64_PLTGOT16_LO,
+ BFD_RELOC_PPC64_PLTGOT16_HI,
+ BFD_RELOC_PPC64_PLTGOT16_HA,
+ BFD_RELOC_PPC64_ADDR16_DS,
+ BFD_RELOC_PPC64_ADDR16_LO_DS,
+ BFD_RELOC_PPC64_GOT16_DS,
+ BFD_RELOC_PPC64_GOT16_LO_DS,
+ BFD_RELOC_PPC64_PLT16_LO_DS,
+ BFD_RELOC_PPC64_SECTOFF_DS,
+ BFD_RELOC_PPC64_SECTOFF_LO_DS,
+ BFD_RELOC_PPC64_TOC16_DS,
+ BFD_RELOC_PPC64_TOC16_LO_DS,
+ BFD_RELOC_PPC64_PLTGOT16_DS,
+ BFD_RELOC_PPC64_PLTGOT16_LO_DS,
+
+/* PowerPC and PowerPC64 thread-local storage relocations. */
+ BFD_RELOC_PPC_TLS,
+ BFD_RELOC_PPC_TLSGD,
+ BFD_RELOC_PPC_TLSLD,
+ BFD_RELOC_PPC_DTPMOD,
+ BFD_RELOC_PPC_TPREL16,
+ BFD_RELOC_PPC_TPREL16_LO,
+ BFD_RELOC_PPC_TPREL16_HI,
+ BFD_RELOC_PPC_TPREL16_HA,
+ BFD_RELOC_PPC_TPREL,
+ BFD_RELOC_PPC_DTPREL16,
+ BFD_RELOC_PPC_DTPREL16_LO,
+ BFD_RELOC_PPC_DTPREL16_HI,
+ BFD_RELOC_PPC_DTPREL16_HA,
+ BFD_RELOC_PPC_DTPREL,
+ BFD_RELOC_PPC_GOT_TLSGD16,
+ BFD_RELOC_PPC_GOT_TLSGD16_LO,
+ BFD_RELOC_PPC_GOT_TLSGD16_HI,
+ BFD_RELOC_PPC_GOT_TLSGD16_HA,
+ BFD_RELOC_PPC_GOT_TLSLD16,
+ BFD_RELOC_PPC_GOT_TLSLD16_LO,
+ BFD_RELOC_PPC_GOT_TLSLD16_HI,
+ BFD_RELOC_PPC_GOT_TLSLD16_HA,
+ BFD_RELOC_PPC_GOT_TPREL16,
+ BFD_RELOC_PPC_GOT_TPREL16_LO,
+ BFD_RELOC_PPC_GOT_TPREL16_HI,
+ BFD_RELOC_PPC_GOT_TPREL16_HA,
+ BFD_RELOC_PPC_GOT_DTPREL16,
+ BFD_RELOC_PPC_GOT_DTPREL16_LO,
+ BFD_RELOC_PPC_GOT_DTPREL16_HI,
+ BFD_RELOC_PPC_GOT_DTPREL16_HA,
+ BFD_RELOC_PPC64_TPREL16_DS,
+ BFD_RELOC_PPC64_TPREL16_LO_DS,
+ BFD_RELOC_PPC64_TPREL16_HIGHER,
+ BFD_RELOC_PPC64_TPREL16_HIGHERA,
+ BFD_RELOC_PPC64_TPREL16_HIGHEST,
+ BFD_RELOC_PPC64_TPREL16_HIGHESTA,
+ BFD_RELOC_PPC64_DTPREL16_DS,
+ BFD_RELOC_PPC64_DTPREL16_LO_DS,
+ BFD_RELOC_PPC64_DTPREL16_HIGHER,
+ BFD_RELOC_PPC64_DTPREL16_HIGHERA,
+ BFD_RELOC_PPC64_DTPREL16_HIGHEST,
+ BFD_RELOC_PPC64_DTPREL16_HIGHESTA,
+
+/* IBM 370/390 relocations */
+ BFD_RELOC_I370_D12,
+
+/* The type of reloc used to build a constructor table - at the moment
+probably a 32 bit wide absolute relocation, but the target can choose.
+It generally does map to one of the other relocation types. */
+ BFD_RELOC_CTOR,
+
+/* ARM 26 bit pc-relative branch. The lowest two bits must be zero and are
+not stored in the instruction. */
+ BFD_RELOC_ARM_PCREL_BRANCH,
+
+/* ARM 26 bit pc-relative branch. The lowest bit must be zero and is
+not stored in the instruction. The 2nd lowest bit comes from a 1 bit
+field in the instruction. */
+ BFD_RELOC_ARM_PCREL_BLX,
+
+/* Thumb 22 bit pc-relative branch. The lowest bit must be zero and is
+not stored in the instruction. The 2nd lowest bit comes from a 1 bit
+field in the instruction. */
+ BFD_RELOC_THUMB_PCREL_BLX,
+
+/* ARM 26-bit pc-relative branch for an unconditional BL or BLX instruction. */
+ BFD_RELOC_ARM_PCREL_CALL,
+
+/* ARM 26-bit pc-relative branch for B or conditional BL instruction. */
+ BFD_RELOC_ARM_PCREL_JUMP,
+
+/* Thumb 7-, 9-, 12-, 20-, 23-, and 25-bit pc-relative branches.
+The lowest bit must be zero and is not stored in the instruction.
+Note that the corresponding ELF R_ARM_THM_JUMPnn constant has an
+"nn" one smaller in all cases. Note further that BRANCH23
+corresponds to R_ARM_THM_CALL. */
+ BFD_RELOC_THUMB_PCREL_BRANCH7,
+ BFD_RELOC_THUMB_PCREL_BRANCH9,
+ BFD_RELOC_THUMB_PCREL_BRANCH12,
+ BFD_RELOC_THUMB_PCREL_BRANCH20,
+ BFD_RELOC_THUMB_PCREL_BRANCH23,
+ BFD_RELOC_THUMB_PCREL_BRANCH25,
+
+/* 12-bit immediate offset, used in ARM-format ldr and str instructions. */
+ BFD_RELOC_ARM_OFFSET_IMM,
+
+/* 5-bit immediate offset, used in Thumb-format ldr and str instructions. */
+ BFD_RELOC_ARM_THUMB_OFFSET,
+
+/* Pc-relative or absolute relocation depending on target. Used for
+entries in .init_array sections. */
+ BFD_RELOC_ARM_TARGET1,
+
+/* Read-only segment base relative address. */
+ BFD_RELOC_ARM_ROSEGREL32,
+
+/* Data segment base relative address. */
+ BFD_RELOC_ARM_SBREL32,
+
+/* This reloc is used for references to RTTI data from exception handling
+tables. The actual definition depends on the target. It may be a
+pc-relative or some form of GOT-indirect relocation. */
+ BFD_RELOC_ARM_TARGET2,
+
+/* 31-bit PC relative address. */
+ BFD_RELOC_ARM_PREL31,
+
+/* Low and High halfword relocations for MOVW and MOVT instructions. */
+ BFD_RELOC_ARM_MOVW,
+ BFD_RELOC_ARM_MOVT,
+ BFD_RELOC_ARM_MOVW_PCREL,
+ BFD_RELOC_ARM_MOVT_PCREL,
+ BFD_RELOC_ARM_THUMB_MOVW,
+ BFD_RELOC_ARM_THUMB_MOVT,
+ BFD_RELOC_ARM_THUMB_MOVW_PCREL,
+ BFD_RELOC_ARM_THUMB_MOVT_PCREL,
+
+/* Relocations for setting up GOTs and PLTs for shared libraries. */
+ BFD_RELOC_ARM_JUMP_SLOT,
+ BFD_RELOC_ARM_GLOB_DAT,
+ BFD_RELOC_ARM_GOT32,
+ BFD_RELOC_ARM_PLT32,
+ BFD_RELOC_ARM_RELATIVE,
+ BFD_RELOC_ARM_GOTOFF,
+ BFD_RELOC_ARM_GOTPC,
+ BFD_RELOC_ARM_GOT_PREL,
+
+/* ARM thread-local storage relocations. */
+ BFD_RELOC_ARM_TLS_GD32,
+ BFD_RELOC_ARM_TLS_LDO32,
+ BFD_RELOC_ARM_TLS_LDM32,
+ BFD_RELOC_ARM_TLS_DTPOFF32,
+ BFD_RELOC_ARM_TLS_DTPMOD32,
+ BFD_RELOC_ARM_TLS_TPOFF32,
+ BFD_RELOC_ARM_TLS_IE32,
+ BFD_RELOC_ARM_TLS_LE32,
+ BFD_RELOC_ARM_TLS_GOTDESC,
+ BFD_RELOC_ARM_TLS_CALL,
+ BFD_RELOC_ARM_THM_TLS_CALL,
+ BFD_RELOC_ARM_TLS_DESCSEQ,
+ BFD_RELOC_ARM_THM_TLS_DESCSEQ,
+ BFD_RELOC_ARM_TLS_DESC,
+
+/* ARM group relocations. */
+ BFD_RELOC_ARM_ALU_PC_G0_NC,
+ BFD_RELOC_ARM_ALU_PC_G0,
+ BFD_RELOC_ARM_ALU_PC_G1_NC,
+ BFD_RELOC_ARM_ALU_PC_G1,
+ BFD_RELOC_ARM_ALU_PC_G2,
+ BFD_RELOC_ARM_LDR_PC_G0,
+ BFD_RELOC_ARM_LDR_PC_G1,
+ BFD_RELOC_ARM_LDR_PC_G2,
+ BFD_RELOC_ARM_LDRS_PC_G0,
+ BFD_RELOC_ARM_LDRS_PC_G1,
+ BFD_RELOC_ARM_LDRS_PC_G2,
+ BFD_RELOC_ARM_LDC_PC_G0,
+ BFD_RELOC_ARM_LDC_PC_G1,
+ BFD_RELOC_ARM_LDC_PC_G2,
+ BFD_RELOC_ARM_ALU_SB_G0_NC,
+ BFD_RELOC_ARM_ALU_SB_G0,
+ BFD_RELOC_ARM_ALU_SB_G1_NC,
+ BFD_RELOC_ARM_ALU_SB_G1,
+ BFD_RELOC_ARM_ALU_SB_G2,
+ BFD_RELOC_ARM_LDR_SB_G0,
+ BFD_RELOC_ARM_LDR_SB_G1,
+ BFD_RELOC_ARM_LDR_SB_G2,
+ BFD_RELOC_ARM_LDRS_SB_G0,
+ BFD_RELOC_ARM_LDRS_SB_G1,
+ BFD_RELOC_ARM_LDRS_SB_G2,
+ BFD_RELOC_ARM_LDC_SB_G0,
+ BFD_RELOC_ARM_LDC_SB_G1,
+ BFD_RELOC_ARM_LDC_SB_G2,
+
+/* Annotation of BX instructions. */
+ BFD_RELOC_ARM_V4BX,
+
+/* ARM support for STT_GNU_IFUNC. */
+ BFD_RELOC_ARM_IRELATIVE,
+
+/* These relocs are only used within the ARM assembler. They are not
+(at present) written to any object files. */
+ BFD_RELOC_ARM_IMMEDIATE,
+ BFD_RELOC_ARM_ADRL_IMMEDIATE,
+ BFD_RELOC_ARM_T32_IMMEDIATE,
+ BFD_RELOC_ARM_T32_ADD_IMM,
+ BFD_RELOC_ARM_T32_IMM12,
+ BFD_RELOC_ARM_T32_ADD_PC12,
+ BFD_RELOC_ARM_SHIFT_IMM,
+ BFD_RELOC_ARM_SMC,
+ BFD_RELOC_ARM_HVC,
+ BFD_RELOC_ARM_SWI,
+ BFD_RELOC_ARM_MULTI,
+ BFD_RELOC_ARM_CP_OFF_IMM,
+ BFD_RELOC_ARM_CP_OFF_IMM_S2,
+ BFD_RELOC_ARM_T32_CP_OFF_IMM,
+ BFD_RELOC_ARM_T32_CP_OFF_IMM_S2,
+ BFD_RELOC_ARM_ADR_IMM,
+ BFD_RELOC_ARM_LDR_IMM,
+ BFD_RELOC_ARM_LITERAL,
+ BFD_RELOC_ARM_IN_POOL,
+ BFD_RELOC_ARM_OFFSET_IMM8,
+ BFD_RELOC_ARM_T32_OFFSET_U8,
+ BFD_RELOC_ARM_T32_OFFSET_IMM,
+ BFD_RELOC_ARM_HWLITERAL,
+ BFD_RELOC_ARM_THUMB_ADD,
+ BFD_RELOC_ARM_THUMB_IMM,
+ BFD_RELOC_ARM_THUMB_SHIFT,
+
+/* Renesas / SuperH SH relocs. Not all of these appear in object files. */
+ BFD_RELOC_SH_PCDISP8BY2,
+ BFD_RELOC_SH_PCDISP12BY2,
+ BFD_RELOC_SH_IMM3,
+ BFD_RELOC_SH_IMM3U,
+ BFD_RELOC_SH_DISP12,
+ BFD_RELOC_SH_DISP12BY2,
+ BFD_RELOC_SH_DISP12BY4,
+ BFD_RELOC_SH_DISP12BY8,
+ BFD_RELOC_SH_DISP20,
+ BFD_RELOC_SH_DISP20BY8,
+ BFD_RELOC_SH_IMM4,
+ BFD_RELOC_SH_IMM4BY2,
+ BFD_RELOC_SH_IMM4BY4,
+ BFD_RELOC_SH_IMM8,
+ BFD_RELOC_SH_IMM8BY2,
+ BFD_RELOC_SH_IMM8BY4,
+ BFD_RELOC_SH_PCRELIMM8BY2,
+ BFD_RELOC_SH_PCRELIMM8BY4,
+ BFD_RELOC_SH_SWITCH16,
+ BFD_RELOC_SH_SWITCH32,
+ BFD_RELOC_SH_USES,
+ BFD_RELOC_SH_COUNT,
+ BFD_RELOC_SH_ALIGN,
+ BFD_RELOC_SH_CODE,
+ BFD_RELOC_SH_DATA,
+ BFD_RELOC_SH_LABEL,
+ BFD_RELOC_SH_LOOP_START,
+ BFD_RELOC_SH_LOOP_END,
+ BFD_RELOC_SH_COPY,
+ BFD_RELOC_SH_GLOB_DAT,
+ BFD_RELOC_SH_JMP_SLOT,
+ BFD_RELOC_SH_RELATIVE,
+ BFD_RELOC_SH_GOTPC,
+ BFD_RELOC_SH_GOT_LOW16,
+ BFD_RELOC_SH_GOT_MEDLOW16,
+ BFD_RELOC_SH_GOT_MEDHI16,
+ BFD_RELOC_SH_GOT_HI16,
+ BFD_RELOC_SH_GOTPLT_LOW16,
+ BFD_RELOC_SH_GOTPLT_MEDLOW16,
+ BFD_RELOC_SH_GOTPLT_MEDHI16,
+ BFD_RELOC_SH_GOTPLT_HI16,
+ BFD_RELOC_SH_PLT_LOW16,
+ BFD_RELOC_SH_PLT_MEDLOW16,
+ BFD_RELOC_SH_PLT_MEDHI16,
+ BFD_RELOC_SH_PLT_HI16,
+ BFD_RELOC_SH_GOTOFF_LOW16,
+ BFD_RELOC_SH_GOTOFF_MEDLOW16,
+ BFD_RELOC_SH_GOTOFF_MEDHI16,
+ BFD_RELOC_SH_GOTOFF_HI16,
+ BFD_RELOC_SH_GOTPC_LOW16,
+ BFD_RELOC_SH_GOTPC_MEDLOW16,
+ BFD_RELOC_SH_GOTPC_MEDHI16,
+ BFD_RELOC_SH_GOTPC_HI16,
+ BFD_RELOC_SH_COPY64,
+ BFD_RELOC_SH_GLOB_DAT64,
+ BFD_RELOC_SH_JMP_SLOT64,
+ BFD_RELOC_SH_RELATIVE64,
+ BFD_RELOC_SH_GOT10BY4,
+ BFD_RELOC_SH_GOT10BY8,
+ BFD_RELOC_SH_GOTPLT10BY4,
+ BFD_RELOC_SH_GOTPLT10BY8,
+ BFD_RELOC_SH_GOTPLT32,
+ BFD_RELOC_SH_SHMEDIA_CODE,
+ BFD_RELOC_SH_IMMU5,
+ BFD_RELOC_SH_IMMS6,
+ BFD_RELOC_SH_IMMS6BY32,
+ BFD_RELOC_SH_IMMU6,
+ BFD_RELOC_SH_IMMS10,
+ BFD_RELOC_SH_IMMS10BY2,
+ BFD_RELOC_SH_IMMS10BY4,
+ BFD_RELOC_SH_IMMS10BY8,
+ BFD_RELOC_SH_IMMS16,
+ BFD_RELOC_SH_IMMU16,
+ BFD_RELOC_SH_IMM_LOW16,
+ BFD_RELOC_SH_IMM_LOW16_PCREL,
+ BFD_RELOC_SH_IMM_MEDLOW16,
+ BFD_RELOC_SH_IMM_MEDLOW16_PCREL,
+ BFD_RELOC_SH_IMM_MEDHI16,
+ BFD_RELOC_SH_IMM_MEDHI16_PCREL,
+ BFD_RELOC_SH_IMM_HI16,
+ BFD_RELOC_SH_IMM_HI16_PCREL,
+ BFD_RELOC_SH_PT_16,
+ BFD_RELOC_SH_TLS_GD_32,
+ BFD_RELOC_SH_TLS_LD_32,
+ BFD_RELOC_SH_TLS_LDO_32,
+ BFD_RELOC_SH_TLS_IE_32,
+ BFD_RELOC_SH_TLS_LE_32,
+ BFD_RELOC_SH_TLS_DTPMOD32,
+ BFD_RELOC_SH_TLS_DTPOFF32,
+ BFD_RELOC_SH_TLS_TPOFF32,
+ BFD_RELOC_SH_GOT20,
+ BFD_RELOC_SH_GOTOFF20,
+ BFD_RELOC_SH_GOTFUNCDESC,
+ BFD_RELOC_SH_GOTFUNCDESC20,
+ BFD_RELOC_SH_GOTOFFFUNCDESC,
+ BFD_RELOC_SH_GOTOFFFUNCDESC20,
+ BFD_RELOC_SH_FUNCDESC,
+
+/* ARC Cores relocs.
+ARC 22 bit pc-relative branch. The lowest two bits must be zero and are
+not stored in the instruction. The high 20 bits are installed in bits 26
+through 7 of the instruction. */
+ BFD_RELOC_ARC_B22_PCREL,
+
+/* ARC 26 bit absolute branch. The lowest two bits must be zero and are not
+stored in the instruction. The high 24 bits are installed in bits 23
+through 0. */
+ BFD_RELOC_ARC_B26,
+
+/* ADI Blackfin 16 bit immediate absolute reloc. */
+ BFD_RELOC_BFIN_16_IMM,
+
+/* ADI Blackfin 16 bit immediate absolute reloc higher 16 bits. */
+ BFD_RELOC_BFIN_16_HIGH,
+
+/* ADI Blackfin 'a' part of LSETUP. */
+ BFD_RELOC_BFIN_4_PCREL,
+
+/* ADI Blackfin. */
+ BFD_RELOC_BFIN_5_PCREL,
+
+/* ADI Blackfin 16 bit immediate absolute reloc lower 16 bits. */
+ BFD_RELOC_BFIN_16_LOW,
+
+/* ADI Blackfin. */
+ BFD_RELOC_BFIN_10_PCREL,
+
+/* ADI Blackfin 'b' part of LSETUP. */
+ BFD_RELOC_BFIN_11_PCREL,
+
+/* ADI Blackfin. */
+ BFD_RELOC_BFIN_12_PCREL_JUMP,
+
+/* ADI Blackfin Short jump, pcrel. */
+ BFD_RELOC_BFIN_12_PCREL_JUMP_S,
+
+/* ADI Blackfin Call.x not implemented. */
+ BFD_RELOC_BFIN_24_PCREL_CALL_X,
+
+/* ADI Blackfin Long Jump pcrel. */
+ BFD_RELOC_BFIN_24_PCREL_JUMP_L,
+
+/* ADI Blackfin FD-PIC relocations. */
+ BFD_RELOC_BFIN_GOT17M4,
+ BFD_RELOC_BFIN_GOTHI,
+ BFD_RELOC_BFIN_GOTLO,
+ BFD_RELOC_BFIN_FUNCDESC,
+ BFD_RELOC_BFIN_FUNCDESC_GOT17M4,
+ BFD_RELOC_BFIN_FUNCDESC_GOTHI,
+ BFD_RELOC_BFIN_FUNCDESC_GOTLO,
+ BFD_RELOC_BFIN_FUNCDESC_VALUE,
+ BFD_RELOC_BFIN_FUNCDESC_GOTOFF17M4,
+ BFD_RELOC_BFIN_FUNCDESC_GOTOFFHI,
+ BFD_RELOC_BFIN_FUNCDESC_GOTOFFLO,
+ BFD_RELOC_BFIN_GOTOFF17M4,
+ BFD_RELOC_BFIN_GOTOFFHI,
+ BFD_RELOC_BFIN_GOTOFFLO,
+
+/* ADI Blackfin GOT relocation. */
+ BFD_RELOC_BFIN_GOT,
+
+/* ADI Blackfin PLTPC relocation. */
+ BFD_RELOC_BFIN_PLTPC,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_PUSH,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_CONST,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_ADD,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_SUB,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_MULT,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_DIV,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_MOD,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_LSHIFT,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_RSHIFT,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_AND,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_OR,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_XOR,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_LAND,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_LOR,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_LEN,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_NEG,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_COMP,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_PAGE,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_HWPAGE,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_ADDR,
+
+/* Mitsubishi D10V relocs.
+This is a 10-bit reloc with the right 2 bits
+assumed to be 0. */
+ BFD_RELOC_D10V_10_PCREL_R,
+
+/* Mitsubishi D10V relocs.
+This is a 10-bit reloc with the right 2 bits
+assumed to be 0. This is the same as the previous reloc
+except it is in the left container, i.e.,
+shifted left 15 bits. */
+ BFD_RELOC_D10V_10_PCREL_L,
+
+/* This is an 18-bit reloc with the right 2 bits
+assumed to be 0. */
+ BFD_RELOC_D10V_18,
+
+/* This is an 18-bit reloc with the right 2 bits
+assumed to be 0. */
+ BFD_RELOC_D10V_18_PCREL,
+
+/* Mitsubishi D30V relocs.
+This is a 6-bit absolute reloc. */
+ BFD_RELOC_D30V_6,
+
+/* This is a 6-bit pc-relative reloc with
+the right 3 bits assumed to be 0. */
+ BFD_RELOC_D30V_9_PCREL,
+
+/* This is a 6-bit pc-relative reloc with
+the right 3 bits assumed to be 0. Same
+as the previous reloc but on the right side
+of the container. */
+ BFD_RELOC_D30V_9_PCREL_R,
+
+/* This is a 12-bit absolute reloc with the
+right 3 bitsassumed to be 0. */
+ BFD_RELOC_D30V_15,
+
+/* This is a 12-bit pc-relative reloc with
+the right 3 bits assumed to be 0. */
+ BFD_RELOC_D30V_15_PCREL,
+
+/* This is a 12-bit pc-relative reloc with
+the right 3 bits assumed to be 0. Same
+as the previous reloc but on the right side
+of the container. */
+ BFD_RELOC_D30V_15_PCREL_R,
+
+/* This is an 18-bit absolute reloc with
+the right 3 bits assumed to be 0. */
+ BFD_RELOC_D30V_21,
+
+/* This is an 18-bit pc-relative reloc with
+the right 3 bits assumed to be 0. */
+ BFD_RELOC_D30V_21_PCREL,
+
+/* This is an 18-bit pc-relative reloc with
+the right 3 bits assumed to be 0. Same
+as the previous reloc but on the right side
+of the container. */
+ BFD_RELOC_D30V_21_PCREL_R,
+
+/* This is a 32-bit absolute reloc. */
+ BFD_RELOC_D30V_32,
+
+/* This is a 32-bit pc-relative reloc. */
+ BFD_RELOC_D30V_32_PCREL,
+
+/* DLX relocs */
+ BFD_RELOC_DLX_HI16_S,
+
+/* DLX relocs */
+ BFD_RELOC_DLX_LO16,
+
+/* DLX relocs */
+ BFD_RELOC_DLX_JMP26,
+
+/* Renesas M16C/M32C Relocations. */
+ BFD_RELOC_M32C_HI8,
+ BFD_RELOC_M32C_RL_JUMP,
+ BFD_RELOC_M32C_RL_1ADDR,
+ BFD_RELOC_M32C_RL_2ADDR,
+
+/* Renesas M32R (formerly Mitsubishi M32R) relocs.
+This is a 24 bit absolute address. */
+ BFD_RELOC_M32R_24,
+
+/* This is a 10-bit pc-relative reloc with the right 2 bits assumed to be 0. */
+ BFD_RELOC_M32R_10_PCREL,
+
+/* This is an 18-bit reloc with the right 2 bits assumed to be 0. */
+ BFD_RELOC_M32R_18_PCREL,
+
+/* This is a 26-bit reloc with the right 2 bits assumed to be 0. */
+ BFD_RELOC_M32R_26_PCREL,
+
+/* This is a 16-bit reloc containing the high 16 bits of an address
+used when the lower 16 bits are treated as unsigned. */
+ BFD_RELOC_M32R_HI16_ULO,
+
+/* This is a 16-bit reloc containing the high 16 bits of an address
+used when the lower 16 bits are treated as signed. */
+ BFD_RELOC_M32R_HI16_SLO,
+
+/* This is a 16-bit reloc containing the lower 16 bits of an address. */
+ BFD_RELOC_M32R_LO16,
+
+/* This is a 16-bit reloc containing the small data area offset for use in
+add3, load, and store instructions. */
+ BFD_RELOC_M32R_SDA16,
+
+/* For PIC. */
+ BFD_RELOC_M32R_GOT24,
+ BFD_RELOC_M32R_26_PLTREL,
+ BFD_RELOC_M32R_COPY,
+ BFD_RELOC_M32R_GLOB_DAT,
+ BFD_RELOC_M32R_JMP_SLOT,
+ BFD_RELOC_M32R_RELATIVE,
+ BFD_RELOC_M32R_GOTOFF,
+ BFD_RELOC_M32R_GOTOFF_HI_ULO,
+ BFD_RELOC_M32R_GOTOFF_HI_SLO,
+ BFD_RELOC_M32R_GOTOFF_LO,
+ BFD_RELOC_M32R_GOTPC24,
+ BFD_RELOC_M32R_GOT16_HI_ULO,
+ BFD_RELOC_M32R_GOT16_HI_SLO,
+ BFD_RELOC_M32R_GOT16_LO,
+ BFD_RELOC_M32R_GOTPC_HI_ULO,
+ BFD_RELOC_M32R_GOTPC_HI_SLO,
+ BFD_RELOC_M32R_GOTPC_LO,
+
+/* This is a 9-bit reloc */
+ BFD_RELOC_V850_9_PCREL,
+
+/* This is a 22-bit reloc */
+ BFD_RELOC_V850_22_PCREL,
+
+/* This is a 16 bit offset from the short data area pointer. */
+ BFD_RELOC_V850_SDA_16_16_OFFSET,
+
+/* This is a 16 bit offset (of which only 15 bits are used) from the
+short data area pointer. */
+ BFD_RELOC_V850_SDA_15_16_OFFSET,
+
+/* This is a 16 bit offset from the zero data area pointer. */
+ BFD_RELOC_V850_ZDA_16_16_OFFSET,
+
+/* This is a 16 bit offset (of which only 15 bits are used) from the
+zero data area pointer. */
+ BFD_RELOC_V850_ZDA_15_16_OFFSET,
+
+/* This is an 8 bit offset (of which only 6 bits are used) from the
+tiny data area pointer. */
+ BFD_RELOC_V850_TDA_6_8_OFFSET,
+
+/* This is an 8bit offset (of which only 7 bits are used) from the tiny
+data area pointer. */
+ BFD_RELOC_V850_TDA_7_8_OFFSET,
+
+/* This is a 7 bit offset from the tiny data area pointer. */
+ BFD_RELOC_V850_TDA_7_7_OFFSET,
+
+/* This is a 16 bit offset from the tiny data area pointer. */
+ BFD_RELOC_V850_TDA_16_16_OFFSET,
+
+/* This is a 5 bit offset (of which only 4 bits are used) from the tiny
+data area pointer. */
+ BFD_RELOC_V850_TDA_4_5_OFFSET,
+
+/* This is a 4 bit offset from the tiny data area pointer. */
+ BFD_RELOC_V850_TDA_4_4_OFFSET,
+
+/* This is a 16 bit offset from the short data area pointer, with the
+bits placed non-contiguously in the instruction. */
+ BFD_RELOC_V850_SDA_16_16_SPLIT_OFFSET,
+
+/* This is a 16 bit offset from the zero data area pointer, with the
+bits placed non-contiguously in the instruction. */
+ BFD_RELOC_V850_ZDA_16_16_SPLIT_OFFSET,
+
+/* This is a 6 bit offset from the call table base pointer. */
+ BFD_RELOC_V850_CALLT_6_7_OFFSET,
+
+/* This is a 16 bit offset from the call table base pointer. */
+ BFD_RELOC_V850_CALLT_16_16_OFFSET,
+
+/* Used for relaxing indirect function calls. */
+ BFD_RELOC_V850_LONGCALL,
+
+/* Used for relaxing indirect jumps. */
+ BFD_RELOC_V850_LONGJUMP,
+
+/* Used to maintain alignment whilst relaxing. */
+ BFD_RELOC_V850_ALIGN,
+
+/* This is a variation of BFD_RELOC_LO16 that can be used in v850e ld.bu
+instructions. */
+ BFD_RELOC_V850_LO16_SPLIT_OFFSET,
+
+/* This is a 16-bit reloc. */
+ BFD_RELOC_V850_16_PCREL,
+
+/* This is a 17-bit reloc. */
+ BFD_RELOC_V850_17_PCREL,
+
+/* This is a 23-bit reloc. */
+ BFD_RELOC_V850_23,
+
+/* This is a 32-bit reloc. */
+ BFD_RELOC_V850_32_PCREL,
+
+/* This is a 32-bit reloc. */
+ BFD_RELOC_V850_32_ABS,
+
+/* This is a 16-bit reloc. */
+ BFD_RELOC_V850_16_SPLIT_OFFSET,
+
+/* This is a 16-bit reloc. */
+ BFD_RELOC_V850_16_S1,
+
+/* Low 16 bits. 16 bit shifted by 1. */
+ BFD_RELOC_V850_LO16_S1,
+
+/* This is a 16 bit offset from the call table base pointer. */
+ BFD_RELOC_V850_CALLT_15_16_OFFSET,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_32_GOTPCREL,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_16_GOT,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_32_GOT,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_22_PLT_PCREL,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_32_PLT_PCREL,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_COPY,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_GLOB_DAT,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_JMP_SLOT,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_RELATIVE,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_16_GOTOFF,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_32_GOTOFF,
+
+/* start code. */
+ BFD_RELOC_V850_CODE,
+
+/* start data in text. */
+ BFD_RELOC_V850_DATA,
+
+/* This is a 8bit DP reloc for the tms320c30, where the most
+significant 8 bits of a 24 bit word are placed into the least
+significant 8 bits of the opcode. */
+ BFD_RELOC_TIC30_LDP,
+
+/* This is a 7bit reloc for the tms320c54x, where the least
+significant 7 bits of a 16 bit word are placed into the least
+significant 7 bits of the opcode. */
+ BFD_RELOC_TIC54X_PARTLS7,
+
+/* This is a 9bit DP reloc for the tms320c54x, where the most
+significant 9 bits of a 16 bit word are placed into the least
+significant 9 bits of the opcode. */
+ BFD_RELOC_TIC54X_PARTMS9,
+
+/* This is an extended address 23-bit reloc for the tms320c54x. */
+ BFD_RELOC_TIC54X_23,
+
+/* This is a 16-bit reloc for the tms320c54x, where the least
+significant 16 bits of a 23-bit extended address are placed into
+the opcode. */
+ BFD_RELOC_TIC54X_16_OF_23,
+
+/* This is a reloc for the tms320c54x, where the most
+significant 7 bits of a 23-bit extended address are placed into
+the opcode. */
+ BFD_RELOC_TIC54X_MS7_OF_23,
+
+/* TMS320C6000 relocations. */
+ BFD_RELOC_C6000_PCR_S21,
+ BFD_RELOC_C6000_PCR_S12,
+ BFD_RELOC_C6000_PCR_S10,
+ BFD_RELOC_C6000_PCR_S7,
+ BFD_RELOC_C6000_ABS_S16,
+ BFD_RELOC_C6000_ABS_L16,
+ BFD_RELOC_C6000_ABS_H16,
+ BFD_RELOC_C6000_SBR_U15_B,
+ BFD_RELOC_C6000_SBR_U15_H,
+ BFD_RELOC_C6000_SBR_U15_W,
+ BFD_RELOC_C6000_SBR_S16,
+ BFD_RELOC_C6000_SBR_L16_B,
+ BFD_RELOC_C6000_SBR_L16_H,
+ BFD_RELOC_C6000_SBR_L16_W,
+ BFD_RELOC_C6000_SBR_H16_B,
+ BFD_RELOC_C6000_SBR_H16_H,
+ BFD_RELOC_C6000_SBR_H16_W,
+ BFD_RELOC_C6000_SBR_GOT_U15_W,
+ BFD_RELOC_C6000_SBR_GOT_L16_W,
+ BFD_RELOC_C6000_SBR_GOT_H16_W,
+ BFD_RELOC_C6000_DSBT_INDEX,
+ BFD_RELOC_C6000_PREL31,
+ BFD_RELOC_C6000_COPY,
+ BFD_RELOC_C6000_JUMP_SLOT,
+ BFD_RELOC_C6000_EHTYPE,
+ BFD_RELOC_C6000_PCR_H16,
+ BFD_RELOC_C6000_PCR_L16,
+ BFD_RELOC_C6000_ALIGN,
+ BFD_RELOC_C6000_FPHEAD,
+ BFD_RELOC_C6000_NOCMP,
+
+/* This is a 48 bit reloc for the FR30 that stores 32 bits. */
+ BFD_RELOC_FR30_48,
+
+/* This is a 32 bit reloc for the FR30 that stores 20 bits split up into
+two sections. */
+ BFD_RELOC_FR30_20,
+
+/* This is a 16 bit reloc for the FR30 that stores a 6 bit word offset in
+4 bits. */
+ BFD_RELOC_FR30_6_IN_4,
+
+/* This is a 16 bit reloc for the FR30 that stores an 8 bit byte offset
+into 8 bits. */
+ BFD_RELOC_FR30_8_IN_8,
+
+/* This is a 16 bit reloc for the FR30 that stores a 9 bit short offset
+into 8 bits. */
+ BFD_RELOC_FR30_9_IN_8,
+
+/* This is a 16 bit reloc for the FR30 that stores a 10 bit word offset
+into 8 bits. */
+ BFD_RELOC_FR30_10_IN_8,
+
+/* This is a 16 bit reloc for the FR30 that stores a 9 bit pc relative
+short offset into 8 bits. */
+ BFD_RELOC_FR30_9_PCREL,
+
+/* This is a 16 bit reloc for the FR30 that stores a 12 bit pc relative
+short offset into 11 bits. */
+ BFD_RELOC_FR30_12_PCREL,
+
+/* Motorola Mcore relocations. */
+ BFD_RELOC_MCORE_PCREL_IMM8BY4,
+ BFD_RELOC_MCORE_PCREL_IMM11BY2,
+ BFD_RELOC_MCORE_PCREL_IMM4BY2,
+ BFD_RELOC_MCORE_PCREL_32,
+ BFD_RELOC_MCORE_PCREL_JSR_IMM11BY2,
+ BFD_RELOC_MCORE_RVA,
+
+/* Toshiba Media Processor Relocations. */
+ BFD_RELOC_MEP_8,
+ BFD_RELOC_MEP_16,
+ BFD_RELOC_MEP_32,
+ BFD_RELOC_MEP_PCREL8A2,
+ BFD_RELOC_MEP_PCREL12A2,
+ BFD_RELOC_MEP_PCREL17A2,
+ BFD_RELOC_MEP_PCREL24A2,
+ BFD_RELOC_MEP_PCABS24A2,
+ BFD_RELOC_MEP_LOW16,
+ BFD_RELOC_MEP_HI16U,
+ BFD_RELOC_MEP_HI16S,
+ BFD_RELOC_MEP_GPREL,
+ BFD_RELOC_MEP_TPREL,
+ BFD_RELOC_MEP_TPREL7,
+ BFD_RELOC_MEP_TPREL7A2,
+ BFD_RELOC_MEP_TPREL7A4,
+ BFD_RELOC_MEP_UIMM24,
+ BFD_RELOC_MEP_ADDR24A4,
+ BFD_RELOC_MEP_GNU_VTINHERIT,
+ BFD_RELOC_MEP_GNU_VTENTRY,
+
+
+/* These are relocations for the GETA instruction. */
+ BFD_RELOC_MMIX_GETA,
+ BFD_RELOC_MMIX_GETA_1,
+ BFD_RELOC_MMIX_GETA_2,
+ BFD_RELOC_MMIX_GETA_3,
+
+/* These are relocations for a conditional branch instruction. */
+ BFD_RELOC_MMIX_CBRANCH,
+ BFD_RELOC_MMIX_CBRANCH_J,
+ BFD_RELOC_MMIX_CBRANCH_1,
+ BFD_RELOC_MMIX_CBRANCH_2,
+ BFD_RELOC_MMIX_CBRANCH_3,
+
+/* These are relocations for the PUSHJ instruction. */
+ BFD_RELOC_MMIX_PUSHJ,
+ BFD_RELOC_MMIX_PUSHJ_1,
+ BFD_RELOC_MMIX_PUSHJ_2,
+ BFD_RELOC_MMIX_PUSHJ_3,
+ BFD_RELOC_MMIX_PUSHJ_STUBBABLE,
+
+/* These are relocations for the JMP instruction. */
+ BFD_RELOC_MMIX_JMP,
+ BFD_RELOC_MMIX_JMP_1,
+ BFD_RELOC_MMIX_JMP_2,
+ BFD_RELOC_MMIX_JMP_3,
+
+/* This is a relocation for a relative address as in a GETA instruction or
+a branch. */
+ BFD_RELOC_MMIX_ADDR19,
+
+/* This is a relocation for a relative address as in a JMP instruction. */
+ BFD_RELOC_MMIX_ADDR27,
+
+/* This is a relocation for an instruction field that may be a general
+register or a value 0..255. */
+ BFD_RELOC_MMIX_REG_OR_BYTE,
+
+/* This is a relocation for an instruction field that may be a general
+register. */
+ BFD_RELOC_MMIX_REG,
+
+/* This is a relocation for two instruction fields holding a register and
+an offset, the equivalent of the relocation. */
+ BFD_RELOC_MMIX_BASE_PLUS_OFFSET,
+
+/* This relocation is an assertion that the expression is not allocated as
+a global register. It does not modify contents. */
+ BFD_RELOC_MMIX_LOCAL,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit pc relative
+short offset into 7 bits. */
+ BFD_RELOC_AVR_7_PCREL,
+
+/* This is a 16 bit reloc for the AVR that stores 13 bit pc relative
+short offset into 12 bits. */
+ BFD_RELOC_AVR_13_PCREL,
+
+/* This is a 16 bit reloc for the AVR that stores 17 bit value (usually
+program memory address) into 16 bits. */
+ BFD_RELOC_AVR_16_PM,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually
+data memory address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_LO8_LDI,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit
+of data memory address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_HI8_LDI,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit
+of program memory address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_HH8_LDI,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit
+of 32 bit value) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_MS8_LDI,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(usually data memory address) into 8 bit immediate value of SUBI insn. */
+ BFD_RELOC_AVR_LO8_LDI_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(high 8 bit of data memory address) into 8 bit immediate value of
+SUBI insn. */
+ BFD_RELOC_AVR_HI8_LDI_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(most high 8 bit of program memory address) into 8 bit immediate value
+of LDI or SUBI insn. */
+ BFD_RELOC_AVR_HH8_LDI_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value (msb
+of 32 bit value) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_MS8_LDI_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually
+command address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_LO8_LDI_PM,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value
+(command address) into 8 bit immediate value of LDI insn. If the address
+is beyond the 128k boundary, the linker inserts a jump stub for this reloc
+in the lower 128k. */
+ BFD_RELOC_AVR_LO8_LDI_GS,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit
+of command address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_HI8_LDI_PM,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit
+of command address) into 8 bit immediate value of LDI insn. If the address
+is beyond the 128k boundary, the linker inserts a jump stub for this reloc
+below 128k. */
+ BFD_RELOC_AVR_HI8_LDI_GS,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit
+of command address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_HH8_LDI_PM,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(usually command address) into 8 bit immediate value of SUBI insn. */
+ BFD_RELOC_AVR_LO8_LDI_PM_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(high 8 bit of 16 bit command address) into 8 bit immediate value
+of SUBI insn. */
+ BFD_RELOC_AVR_HI8_LDI_PM_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(high 6 bit of 22 bit command address) into 8 bit immediate
+value of SUBI insn. */
+ BFD_RELOC_AVR_HH8_LDI_PM_NEG,
+
+/* This is a 32 bit reloc for the AVR that stores 23 bit value
+into 22 bits. */
+ BFD_RELOC_AVR_CALL,
+
+/* This is a 16 bit reloc for the AVR that stores all needed bits
+for absolute addressing with ldi with overflow check to linktime */
+ BFD_RELOC_AVR_LDI,
+
+/* This is a 6 bit reloc for the AVR that stores offset for ldd/std
+instructions */
+ BFD_RELOC_AVR_6,
+
+/* This is a 6 bit reloc for the AVR that stores offset for adiw/sbiw
+instructions */
+ BFD_RELOC_AVR_6_ADIW,
+
+/* This is a 8 bit reloc for the AVR that stores bits 0..7 of a symbol
+in .byte lo8(symbol) */
+ BFD_RELOC_AVR_8_LO,
+
+/* This is a 8 bit reloc for the AVR that stores bits 8..15 of a symbol
+in .byte hi8(symbol) */
+ BFD_RELOC_AVR_8_HI,
+
+/* This is a 8 bit reloc for the AVR that stores bits 16..23 of a symbol
+in .byte hlo8(symbol) */
+ BFD_RELOC_AVR_8_HLO,
+
+/* Renesas RL78 Relocations. */
+ BFD_RELOC_RL78_NEG8,
+ BFD_RELOC_RL78_NEG16,
+ BFD_RELOC_RL78_NEG24,
+ BFD_RELOC_RL78_NEG32,
+ BFD_RELOC_RL78_16_OP,
+ BFD_RELOC_RL78_24_OP,
+ BFD_RELOC_RL78_32_OP,
+ BFD_RELOC_RL78_8U,
+ BFD_RELOC_RL78_16U,
+ BFD_RELOC_RL78_24U,
+ BFD_RELOC_RL78_DIR3U_PCREL,
+ BFD_RELOC_RL78_DIFF,
+ BFD_RELOC_RL78_GPRELB,
+ BFD_RELOC_RL78_GPRELW,
+ BFD_RELOC_RL78_GPRELL,
+ BFD_RELOC_RL78_SYM,
+ BFD_RELOC_RL78_OP_SUBTRACT,
+ BFD_RELOC_RL78_OP_NEG,
+ BFD_RELOC_RL78_OP_AND,
+ BFD_RELOC_RL78_OP_SHRA,
+ BFD_RELOC_RL78_ABS8,
+ BFD_RELOC_RL78_ABS16,
+ BFD_RELOC_RL78_ABS16_REV,
+ BFD_RELOC_RL78_ABS32,
+ BFD_RELOC_RL78_ABS32_REV,
+ BFD_RELOC_RL78_ABS16U,
+ BFD_RELOC_RL78_ABS16UW,
+ BFD_RELOC_RL78_ABS16UL,
+ BFD_RELOC_RL78_RELAX,
+ BFD_RELOC_RL78_HI16,
+ BFD_RELOC_RL78_HI8,
+ BFD_RELOC_RL78_LO16,
+
+/* Renesas RX Relocations. */
+ BFD_RELOC_RX_NEG8,
+ BFD_RELOC_RX_NEG16,
+ BFD_RELOC_RX_NEG24,
+ BFD_RELOC_RX_NEG32,
+ BFD_RELOC_RX_16_OP,
+ BFD_RELOC_RX_24_OP,
+ BFD_RELOC_RX_32_OP,
+ BFD_RELOC_RX_8U,
+ BFD_RELOC_RX_16U,
+ BFD_RELOC_RX_24U,
+ BFD_RELOC_RX_DIR3U_PCREL,
+ BFD_RELOC_RX_DIFF,
+ BFD_RELOC_RX_GPRELB,
+ BFD_RELOC_RX_GPRELW,
+ BFD_RELOC_RX_GPRELL,
+ BFD_RELOC_RX_SYM,
+ BFD_RELOC_RX_OP_SUBTRACT,
+ BFD_RELOC_RX_OP_NEG,
+ BFD_RELOC_RX_ABS8,
+ BFD_RELOC_RX_ABS16,
+ BFD_RELOC_RX_ABS16_REV,
+ BFD_RELOC_RX_ABS32,
+ BFD_RELOC_RX_ABS32_REV,
+ BFD_RELOC_RX_ABS16U,
+ BFD_RELOC_RX_ABS16UW,
+ BFD_RELOC_RX_ABS16UL,
+ BFD_RELOC_RX_RELAX,
+
+/* Direct 12 bit. */
+ BFD_RELOC_390_12,
+
+/* 12 bit GOT offset. */
+ BFD_RELOC_390_GOT12,
+
+/* 32 bit PC relative PLT address. */
+ BFD_RELOC_390_PLT32,
+
+/* Copy symbol at runtime. */
+ BFD_RELOC_390_COPY,
+
+/* Create GOT entry. */
+ BFD_RELOC_390_GLOB_DAT,
+
+/* Create PLT entry. */
+ BFD_RELOC_390_JMP_SLOT,
+
+/* Adjust by program base. */
+ BFD_RELOC_390_RELATIVE,
+
+/* 32 bit PC relative offset to GOT. */
+ BFD_RELOC_390_GOTPC,
+
+/* 16 bit GOT offset. */
+ BFD_RELOC_390_GOT16,
+
+/* PC relative 16 bit shifted by 1. */
+ BFD_RELOC_390_PC16DBL,
+
+/* 16 bit PC rel. PLT shifted by 1. */
+ BFD_RELOC_390_PLT16DBL,
+
+/* PC relative 32 bit shifted by 1. */
+ BFD_RELOC_390_PC32DBL,
+
+/* 32 bit PC rel. PLT shifted by 1. */
+ BFD_RELOC_390_PLT32DBL,
+
+/* 32 bit PC rel. GOT shifted by 1. */
+ BFD_RELOC_390_GOTPCDBL,
+
+/* 64 bit GOT offset. */
+ BFD_RELOC_390_GOT64,
+
+/* 64 bit PC relative PLT address. */
+ BFD_RELOC_390_PLT64,
+
+/* 32 bit rel. offset to GOT entry. */
+ BFD_RELOC_390_GOTENT,
+
+/* 64 bit offset to GOT. */
+ BFD_RELOC_390_GOTOFF64,
+
+/* 12-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLT12,
+
+/* 16-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLT16,
+
+/* 32-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLT32,
+
+/* 64-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLT64,
+
+/* 32-bit rel. offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLTENT,
+
+/* 16-bit rel. offset from the GOT to a PLT entry. */
+ BFD_RELOC_390_PLTOFF16,
+
+/* 32-bit rel. offset from the GOT to a PLT entry. */
+ BFD_RELOC_390_PLTOFF32,
+
+/* 64-bit rel. offset from the GOT to a PLT entry. */
+ BFD_RELOC_390_PLTOFF64,
+
+/* s390 tls relocations. */
+ BFD_RELOC_390_TLS_LOAD,
+ BFD_RELOC_390_TLS_GDCALL,
+ BFD_RELOC_390_TLS_LDCALL,
+ BFD_RELOC_390_TLS_GD32,
+ BFD_RELOC_390_TLS_GD64,
+ BFD_RELOC_390_TLS_GOTIE12,
+ BFD_RELOC_390_TLS_GOTIE32,
+ BFD_RELOC_390_TLS_GOTIE64,
+ BFD_RELOC_390_TLS_LDM32,
+ BFD_RELOC_390_TLS_LDM64,
+ BFD_RELOC_390_TLS_IE32,
+ BFD_RELOC_390_TLS_IE64,
+ BFD_RELOC_390_TLS_IEENT,
+ BFD_RELOC_390_TLS_LE32,
+ BFD_RELOC_390_TLS_LE64,
+ BFD_RELOC_390_TLS_LDO32,
+ BFD_RELOC_390_TLS_LDO64,
+ BFD_RELOC_390_TLS_DTPMOD,
+ BFD_RELOC_390_TLS_DTPOFF,
+ BFD_RELOC_390_TLS_TPOFF,
+
+/* Long displacement extension. */
+ BFD_RELOC_390_20,
+ BFD_RELOC_390_GOT20,
+ BFD_RELOC_390_GOTPLT20,
+ BFD_RELOC_390_TLS_GOTIE20,
+
+/* STT_GNU_IFUNC relocation. */
+ BFD_RELOC_390_IRELATIVE,
+
+/* Score relocations
+Low 16 bit for load/store */
+ BFD_RELOC_SCORE_GPREL15,
+
+/* This is a 24-bit reloc with the right 1 bit assumed to be 0 */
+ BFD_RELOC_SCORE_DUMMY2,
+ BFD_RELOC_SCORE_JMP,
+
+/* This is a 19-bit reloc with the right 1 bit assumed to be 0 */
+ BFD_RELOC_SCORE_BRANCH,
+
+/* This is a 32-bit reloc for 48-bit instructions. */
+ BFD_RELOC_SCORE_IMM30,
+
+/* This is a 32-bit reloc for 48-bit instructions. */
+ BFD_RELOC_SCORE_IMM32,
+
+/* This is a 11-bit reloc with the right 1 bit assumed to be 0 */
+ BFD_RELOC_SCORE16_JMP,
+
+/* This is a 8-bit reloc with the right 1 bit assumed to be 0 */
+ BFD_RELOC_SCORE16_BRANCH,
+
+/* This is a 9-bit reloc with the right 1 bit assumed to be 0 */
+ BFD_RELOC_SCORE_BCMP,
+
+/* Undocumented Score relocs */
+ BFD_RELOC_SCORE_GOT15,
+ BFD_RELOC_SCORE_GOT_LO16,
+ BFD_RELOC_SCORE_CALL15,
+ BFD_RELOC_SCORE_DUMMY_HI16,
+
+/* Scenix IP2K - 9-bit register number / data address */
+ BFD_RELOC_IP2K_FR9,
+
+/* Scenix IP2K - 4-bit register/data bank number */
+ BFD_RELOC_IP2K_BANK,
+
+/* Scenix IP2K - low 13 bits of instruction word address */
+ BFD_RELOC_IP2K_ADDR16CJP,
+
+/* Scenix IP2K - high 3 bits of instruction word address */
+ BFD_RELOC_IP2K_PAGE3,
+
+/* Scenix IP2K - ext/low/high 8 bits of data address */
+ BFD_RELOC_IP2K_LO8DATA,
+ BFD_RELOC_IP2K_HI8DATA,
+ BFD_RELOC_IP2K_EX8DATA,
+
+/* Scenix IP2K - low/high 8 bits of instruction word address */
+ BFD_RELOC_IP2K_LO8INSN,
+ BFD_RELOC_IP2K_HI8INSN,
+
+/* Scenix IP2K - even/odd PC modifier to modify snb pcl.0 */
+ BFD_RELOC_IP2K_PC_SKIP,
+
+/* Scenix IP2K - 16 bit word address in text section. */
+ BFD_RELOC_IP2K_TEXT,
+
+/* Scenix IP2K - 7-bit sp or dp offset */
+ BFD_RELOC_IP2K_FR_OFFSET,
+
+/* Scenix VPE4K coprocessor - data/insn-space addressing */
+ BFD_RELOC_VPE4KMATH_DATA,
+ BFD_RELOC_VPE4KMATH_INSN,
+
+/* These two relocations are used by the linker to determine which of
+the entries in a C++ virtual function table are actually used. When
+the --gc-sections option is given, the linker will zero out the entries
+that are not used, so that the code for those functions need not be
+included in the output.
+
+VTABLE_INHERIT is a zero-space relocation used to describe to the
+linker the inheritance tree of a C++ virtual function table. The
+relocation's symbol should be the parent class' vtable, and the
+relocation should be located at the child vtable.
+
+VTABLE_ENTRY is a zero-space relocation that describes the use of a
+virtual function table entry. The reloc's symbol should refer to the
+table of the class mentioned in the code. Off of that base, an offset
+describes the entry that is being used. For Rela hosts, this offset
+is stored in the reloc's addend. For Rel hosts, we are forced to put
+this offset in the reloc's section offset. */
+ BFD_RELOC_VTABLE_INHERIT,
+ BFD_RELOC_VTABLE_ENTRY,
+
+/* Intel IA64 Relocations. */
+ BFD_RELOC_IA64_IMM14,
+ BFD_RELOC_IA64_IMM22,
+ BFD_RELOC_IA64_IMM64,
+ BFD_RELOC_IA64_DIR32MSB,
+ BFD_RELOC_IA64_DIR32LSB,
+ BFD_RELOC_IA64_DIR64MSB,
+ BFD_RELOC_IA64_DIR64LSB,
+ BFD_RELOC_IA64_GPREL22,
+ BFD_RELOC_IA64_GPREL64I,
+ BFD_RELOC_IA64_GPREL32MSB,
+ BFD_RELOC_IA64_GPREL32LSB,
+ BFD_RELOC_IA64_GPREL64MSB,
+ BFD_RELOC_IA64_GPREL64LSB,
+ BFD_RELOC_IA64_LTOFF22,
+ BFD_RELOC_IA64_LTOFF64I,
+ BFD_RELOC_IA64_PLTOFF22,
+ BFD_RELOC_IA64_PLTOFF64I,
+ BFD_RELOC_IA64_PLTOFF64MSB,
+ BFD_RELOC_IA64_PLTOFF64LSB,
+ BFD_RELOC_IA64_FPTR64I,
+ BFD_RELOC_IA64_FPTR32MSB,
+ BFD_RELOC_IA64_FPTR32LSB,
+ BFD_RELOC_IA64_FPTR64MSB,
+ BFD_RELOC_IA64_FPTR64LSB,
+ BFD_RELOC_IA64_PCREL21B,
+ BFD_RELOC_IA64_PCREL21BI,
+ BFD_RELOC_IA64_PCREL21M,
+ BFD_RELOC_IA64_PCREL21F,
+ BFD_RELOC_IA64_PCREL22,
+ BFD_RELOC_IA64_PCREL60B,
+ BFD_RELOC_IA64_PCREL64I,
+ BFD_RELOC_IA64_PCREL32MSB,
+ BFD_RELOC_IA64_PCREL32LSB,
+ BFD_RELOC_IA64_PCREL64MSB,
+ BFD_RELOC_IA64_PCREL64LSB,
+ BFD_RELOC_IA64_LTOFF_FPTR22,
+ BFD_RELOC_IA64_LTOFF_FPTR64I,
+ BFD_RELOC_IA64_LTOFF_FPTR32MSB,
+ BFD_RELOC_IA64_LTOFF_FPTR32LSB,
+ BFD_RELOC_IA64_LTOFF_FPTR64MSB,
+ BFD_RELOC_IA64_LTOFF_FPTR64LSB,
+ BFD_RELOC_IA64_SEGREL32MSB,
+ BFD_RELOC_IA64_SEGREL32LSB,
+ BFD_RELOC_IA64_SEGREL64MSB,
+ BFD_RELOC_IA64_SEGREL64LSB,
+ BFD_RELOC_IA64_SECREL32MSB,
+ BFD_RELOC_IA64_SECREL32LSB,
+ BFD_RELOC_IA64_SECREL64MSB,
+ BFD_RELOC_IA64_SECREL64LSB,
+ BFD_RELOC_IA64_REL32MSB,
+ BFD_RELOC_IA64_REL32LSB,
+ BFD_RELOC_IA64_REL64MSB,
+ BFD_RELOC_IA64_REL64LSB,
+ BFD_RELOC_IA64_LTV32MSB,
+ BFD_RELOC_IA64_LTV32LSB,
+ BFD_RELOC_IA64_LTV64MSB,
+ BFD_RELOC_IA64_LTV64LSB,
+ BFD_RELOC_IA64_IPLTMSB,
+ BFD_RELOC_IA64_IPLTLSB,
+ BFD_RELOC_IA64_COPY,
+ BFD_RELOC_IA64_LTOFF22X,
+ BFD_RELOC_IA64_LDXMOV,
+ BFD_RELOC_IA64_TPREL14,
+ BFD_RELOC_IA64_TPREL22,
+ BFD_RELOC_IA64_TPREL64I,
+ BFD_RELOC_IA64_TPREL64MSB,
+ BFD_RELOC_IA64_TPREL64LSB,
+ BFD_RELOC_IA64_LTOFF_TPREL22,
+ BFD_RELOC_IA64_DTPMOD64MSB,
+ BFD_RELOC_IA64_DTPMOD64LSB,
+ BFD_RELOC_IA64_LTOFF_DTPMOD22,
+ BFD_RELOC_IA64_DTPREL14,
+ BFD_RELOC_IA64_DTPREL22,
+ BFD_RELOC_IA64_DTPREL64I,
+ BFD_RELOC_IA64_DTPREL32MSB,
+ BFD_RELOC_IA64_DTPREL32LSB,
+ BFD_RELOC_IA64_DTPREL64MSB,
+ BFD_RELOC_IA64_DTPREL64LSB,
+ BFD_RELOC_IA64_LTOFF_DTPREL22,
+
+/* Motorola 68HC11 reloc.
+This is the 8 bit high part of an absolute address. */
+ BFD_RELOC_M68HC11_HI8,
+
+/* Motorola 68HC11 reloc.
+This is the 8 bit low part of an absolute address. */
+ BFD_RELOC_M68HC11_LO8,
+
+/* Motorola 68HC11 reloc.
+This is the 3 bit of a value. */
+ BFD_RELOC_M68HC11_3B,
+
+/* Motorola 68HC11 reloc.
+This reloc marks the beginning of a jump/call instruction.
+It is used for linker relaxation to correctly identify beginning
+of instruction and change some branches to use PC-relative
+addressing mode. */
+ BFD_RELOC_M68HC11_RL_JUMP,
+
+/* Motorola 68HC11 reloc.
+This reloc marks a group of several instructions that gcc generates
+and for which the linker relaxation pass can modify and/or remove
+some of them. */
+ BFD_RELOC_M68HC11_RL_GROUP,
+
+/* Motorola 68HC11 reloc.
+This is the 16-bit lower part of an address. It is used for 'call'
+instruction to specify the symbol address without any special
+transformation (due to memory bank window). */
+ BFD_RELOC_M68HC11_LO16,
+
+/* Motorola 68HC11 reloc.
+This is a 8-bit reloc that specifies the page number of an address.
+It is used by 'call' instruction to specify the page number of
+the symbol. */
+ BFD_RELOC_M68HC11_PAGE,
+
+/* Motorola 68HC11 reloc.
+This is a 24-bit reloc that represents the address with a 16-bit
+value and a 8-bit page number. The symbol address is transformed
+to follow the 16K memory bank of 68HC12 (seen as mapped in the window). */
+ BFD_RELOC_M68HC11_24,
+
+/* Motorola 68HC12 reloc.
+This is the 5 bits of a value. */
+ BFD_RELOC_M68HC12_5B,
+
+/* Freescale XGATE reloc.
+This reloc marks the beginning of a bra/jal instruction. */
+ BFD_RELOC_XGATE_RL_JUMP,
+
+/* Freescale XGATE reloc.
+This reloc marks a group of several instructions that gcc generates
+and for which the linker relaxation pass can modify and/or remove
+some of them. */
+ BFD_RELOC_XGATE_RL_GROUP,
+
+/* Freescale XGATE reloc.
+This is the 16-bit lower part of an address. It is used for the '16-bit'
+instructions. */
+ BFD_RELOC_XGATE_LO16,
+
+/* Freescale XGATE reloc. */
+ BFD_RELOC_XGATE_GPAGE,
+
+/* Freescale XGATE reloc. */
+ BFD_RELOC_XGATE_24,
+
+/* Freescale XGATE reloc.
+This is a 9-bit pc-relative reloc. */
+ BFD_RELOC_XGATE_PCREL_9,
+
+/* Freescale XGATE reloc.
+This is a 10-bit pc-relative reloc. */
+ BFD_RELOC_XGATE_PCREL_10,
+
+/* Freescale XGATE reloc.
+This is the 16-bit lower part of an address. It is used for the '16-bit'
+instructions. */
+ BFD_RELOC_XGATE_IMM8_LO,
+
+/* Freescale XGATE reloc.
+This is the 16-bit higher part of an address. It is used for the '16-bit'
+instructions. */
+ BFD_RELOC_XGATE_IMM8_HI,
+
+/* Freescale XGATE reloc.
+This is a 3-bit pc-relative reloc. */
+ BFD_RELOC_XGATE_IMM3,
+
+/* Freescale XGATE reloc.
+This is a 4-bit pc-relative reloc. */
+ BFD_RELOC_XGATE_IMM4,
+
+/* Freescale XGATE reloc.
+This is a 5-bit pc-relative reloc. */
+ BFD_RELOC_XGATE_IMM5,
+
+/* Motorola 68HC12 reloc.
+This is the 9 bits of a value. */
+ BFD_RELOC_M68HC12_9B,
+
+/* Motorola 68HC12 reloc.
+This is the 16 bits of a value. */
+ BFD_RELOC_M68HC12_16B,
+
+/* Motorola 68HC12/XGATE reloc.
+This is a PCREL9 branch. */
+ BFD_RELOC_M68HC12_9_PCREL,
+
+/* Motorola 68HC12/XGATE reloc.
+This is a PCREL10 branch. */
+ BFD_RELOC_M68HC12_10_PCREL,
+
+/* Motorola 68HC12/XGATE reloc.
+This is the 8 bit low part of an absolute address and immediately precedes
+a matching HI8XG part. */
+ BFD_RELOC_M68HC12_LO8XG,
+
+/* Motorola 68HC12/XGATE reloc.
+This is the 8 bit high part of an absolute address and immediately follows
+a matching LO8XG part. */
+ BFD_RELOC_M68HC12_HI8XG,
+
+/* NS CR16C Relocations. */
+ BFD_RELOC_16C_NUM08,
+ BFD_RELOC_16C_NUM08_C,
+ BFD_RELOC_16C_NUM16,
+ BFD_RELOC_16C_NUM16_C,
+ BFD_RELOC_16C_NUM32,
+ BFD_RELOC_16C_NUM32_C,
+ BFD_RELOC_16C_DISP04,
+ BFD_RELOC_16C_DISP04_C,
+ BFD_RELOC_16C_DISP08,
+ BFD_RELOC_16C_DISP08_C,
+ BFD_RELOC_16C_DISP16,
+ BFD_RELOC_16C_DISP16_C,
+ BFD_RELOC_16C_DISP24,
+ BFD_RELOC_16C_DISP24_C,
+ BFD_RELOC_16C_DISP24a,
+ BFD_RELOC_16C_DISP24a_C,
+ BFD_RELOC_16C_REG04,
+ BFD_RELOC_16C_REG04_C,
+ BFD_RELOC_16C_REG04a,
+ BFD_RELOC_16C_REG04a_C,
+ BFD_RELOC_16C_REG14,
+ BFD_RELOC_16C_REG14_C,
+ BFD_RELOC_16C_REG16,
+ BFD_RELOC_16C_REG16_C,
+ BFD_RELOC_16C_REG20,
+ BFD_RELOC_16C_REG20_C,
+ BFD_RELOC_16C_ABS20,
+ BFD_RELOC_16C_ABS20_C,
+ BFD_RELOC_16C_ABS24,
+ BFD_RELOC_16C_ABS24_C,
+ BFD_RELOC_16C_IMM04,
+ BFD_RELOC_16C_IMM04_C,
+ BFD_RELOC_16C_IMM16,
+ BFD_RELOC_16C_IMM16_C,
+ BFD_RELOC_16C_IMM20,
+ BFD_RELOC_16C_IMM20_C,
+ BFD_RELOC_16C_IMM24,
+ BFD_RELOC_16C_IMM24_C,
+ BFD_RELOC_16C_IMM32,
+ BFD_RELOC_16C_IMM32_C,
+
+/* NS CR16 Relocations. */
+ BFD_RELOC_CR16_NUM8,
+ BFD_RELOC_CR16_NUM16,
+ BFD_RELOC_CR16_NUM32,
+ BFD_RELOC_CR16_NUM32a,
+ BFD_RELOC_CR16_REGREL0,
+ BFD_RELOC_CR16_REGREL4,
+ BFD_RELOC_CR16_REGREL4a,
+ BFD_RELOC_CR16_REGREL14,
+ BFD_RELOC_CR16_REGREL14a,
+ BFD_RELOC_CR16_REGREL16,
+ BFD_RELOC_CR16_REGREL20,
+ BFD_RELOC_CR16_REGREL20a,
+ BFD_RELOC_CR16_ABS20,
+ BFD_RELOC_CR16_ABS24,
+ BFD_RELOC_CR16_IMM4,
+ BFD_RELOC_CR16_IMM8,
+ BFD_RELOC_CR16_IMM16,
+ BFD_RELOC_CR16_IMM20,
+ BFD_RELOC_CR16_IMM24,
+ BFD_RELOC_CR16_IMM32,
+ BFD_RELOC_CR16_IMM32a,
+ BFD_RELOC_CR16_DISP4,
+ BFD_RELOC_CR16_DISP8,
+ BFD_RELOC_CR16_DISP16,
+ BFD_RELOC_CR16_DISP20,
+ BFD_RELOC_CR16_DISP24,
+ BFD_RELOC_CR16_DISP24a,
+ BFD_RELOC_CR16_SWITCH8,
+ BFD_RELOC_CR16_SWITCH16,
+ BFD_RELOC_CR16_SWITCH32,
+ BFD_RELOC_CR16_GOT_REGREL20,
+ BFD_RELOC_CR16_GOTC_REGREL20,
+ BFD_RELOC_CR16_GLOB_DAT,
+
+/* NS CRX Relocations. */
+ BFD_RELOC_CRX_REL4,
+ BFD_RELOC_CRX_REL8,
+ BFD_RELOC_CRX_REL8_CMP,
+ BFD_RELOC_CRX_REL16,
+ BFD_RELOC_CRX_REL24,
+ BFD_RELOC_CRX_REL32,
+ BFD_RELOC_CRX_REGREL12,
+ BFD_RELOC_CRX_REGREL22,
+ BFD_RELOC_CRX_REGREL28,
+ BFD_RELOC_CRX_REGREL32,
+ BFD_RELOC_CRX_ABS16,
+ BFD_RELOC_CRX_ABS32,
+ BFD_RELOC_CRX_NUM8,
+ BFD_RELOC_CRX_NUM16,
+ BFD_RELOC_CRX_NUM32,
+ BFD_RELOC_CRX_IMM16,
+ BFD_RELOC_CRX_IMM32,
+ BFD_RELOC_CRX_SWITCH8,
+ BFD_RELOC_CRX_SWITCH16,
+ BFD_RELOC_CRX_SWITCH32,
+
+/* These relocs are only used within the CRIS assembler. They are not
+(at present) written to any object files. */
+ BFD_RELOC_CRIS_BDISP8,
+ BFD_RELOC_CRIS_UNSIGNED_5,
+ BFD_RELOC_CRIS_SIGNED_6,
+ BFD_RELOC_CRIS_UNSIGNED_6,
+ BFD_RELOC_CRIS_SIGNED_8,
+ BFD_RELOC_CRIS_UNSIGNED_8,
+ BFD_RELOC_CRIS_SIGNED_16,
+ BFD_RELOC_CRIS_UNSIGNED_16,
+ BFD_RELOC_CRIS_LAPCQ_OFFSET,
+ BFD_RELOC_CRIS_UNSIGNED_4,
+
+/* Relocs used in ELF shared libraries for CRIS. */
+ BFD_RELOC_CRIS_COPY,
+ BFD_RELOC_CRIS_GLOB_DAT,
+ BFD_RELOC_CRIS_JUMP_SLOT,
+ BFD_RELOC_CRIS_RELATIVE,
+
+/* 32-bit offset to symbol-entry within GOT. */
+ BFD_RELOC_CRIS_32_GOT,
+
+/* 16-bit offset to symbol-entry within GOT. */
+ BFD_RELOC_CRIS_16_GOT,
+
+/* 32-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_CRIS_32_GOTPLT,
+
+/* 16-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_CRIS_16_GOTPLT,
+
+/* 32-bit offset to symbol, relative to GOT. */
+ BFD_RELOC_CRIS_32_GOTREL,
+
+/* 32-bit offset to symbol with PLT entry, relative to GOT. */
+ BFD_RELOC_CRIS_32_PLT_GOTREL,
+
+/* 32-bit offset to symbol with PLT entry, relative to this relocation. */
+ BFD_RELOC_CRIS_32_PLT_PCREL,
+
+/* Relocs used in TLS code for CRIS. */
+ BFD_RELOC_CRIS_32_GOT_GD,
+ BFD_RELOC_CRIS_16_GOT_GD,
+ BFD_RELOC_CRIS_32_GD,
+ BFD_RELOC_CRIS_DTP,
+ BFD_RELOC_CRIS_32_DTPREL,
+ BFD_RELOC_CRIS_16_DTPREL,
+ BFD_RELOC_CRIS_32_GOT_TPREL,
+ BFD_RELOC_CRIS_16_GOT_TPREL,
+ BFD_RELOC_CRIS_32_TPREL,
+ BFD_RELOC_CRIS_16_TPREL,
+ BFD_RELOC_CRIS_DTPMOD,
+ BFD_RELOC_CRIS_32_IE,
+
+/* Intel i860 Relocations. */
+ BFD_RELOC_860_COPY,
+ BFD_RELOC_860_GLOB_DAT,
+ BFD_RELOC_860_JUMP_SLOT,
+ BFD_RELOC_860_RELATIVE,
+ BFD_RELOC_860_PC26,
+ BFD_RELOC_860_PLT26,
+ BFD_RELOC_860_PC16,
+ BFD_RELOC_860_LOW0,
+ BFD_RELOC_860_SPLIT0,
+ BFD_RELOC_860_LOW1,
+ BFD_RELOC_860_SPLIT1,
+ BFD_RELOC_860_LOW2,
+ BFD_RELOC_860_SPLIT2,
+ BFD_RELOC_860_LOW3,
+ BFD_RELOC_860_LOGOT0,
+ BFD_RELOC_860_SPGOT0,
+ BFD_RELOC_860_LOGOT1,
+ BFD_RELOC_860_SPGOT1,
+ BFD_RELOC_860_LOGOTOFF0,
+ BFD_RELOC_860_SPGOTOFF0,
+ BFD_RELOC_860_LOGOTOFF1,
+ BFD_RELOC_860_SPGOTOFF1,
+ BFD_RELOC_860_LOGOTOFF2,
+ BFD_RELOC_860_LOGOTOFF3,
+ BFD_RELOC_860_LOPC,
+ BFD_RELOC_860_HIGHADJ,
+ BFD_RELOC_860_HAGOT,
+ BFD_RELOC_860_HAGOTOFF,
+ BFD_RELOC_860_HAPC,
+ BFD_RELOC_860_HIGH,
+ BFD_RELOC_860_HIGOT,
+ BFD_RELOC_860_HIGOTOFF,
+
+/* OpenRISC Relocations. */
+ BFD_RELOC_OPENRISC_ABS_26,
+ BFD_RELOC_OPENRISC_REL_26,
+
+/* H8 elf Relocations. */
+ BFD_RELOC_H8_DIR16A8,
+ BFD_RELOC_H8_DIR16R8,
+ BFD_RELOC_H8_DIR24A8,
+ BFD_RELOC_H8_DIR24R8,
+ BFD_RELOC_H8_DIR32A16,
+
+/* Sony Xstormy16 Relocations. */
+ BFD_RELOC_XSTORMY16_REL_12,
+ BFD_RELOC_XSTORMY16_12,
+ BFD_RELOC_XSTORMY16_24,
+ BFD_RELOC_XSTORMY16_FPTR16,
+
+/* Self-describing complex relocations. */
+ BFD_RELOC_RELC,
+
+
+/* Infineon Relocations. */
+ BFD_RELOC_XC16X_PAG,
+ BFD_RELOC_XC16X_POF,
+ BFD_RELOC_XC16X_SEG,
+ BFD_RELOC_XC16X_SOF,
+
+/* Relocations used by VAX ELF. */
+ BFD_RELOC_VAX_GLOB_DAT,
+ BFD_RELOC_VAX_JMP_SLOT,
+ BFD_RELOC_VAX_RELATIVE,
+
+/* Morpho MT - 16 bit immediate relocation. */
+ BFD_RELOC_MT_PC16,
+
+/* Morpho MT - Hi 16 bits of an address. */
+ BFD_RELOC_MT_HI16,
+
+/* Morpho MT - Low 16 bits of an address. */
+ BFD_RELOC_MT_LO16,
+
+/* Morpho MT - Used to tell the linker which vtable entries are used. */
+ BFD_RELOC_MT_GNU_VTINHERIT,
+
+/* Morpho MT - Used to tell the linker which vtable entries are used. */
+ BFD_RELOC_MT_GNU_VTENTRY,
+
+/* Morpho MT - 8 bit immediate relocation. */
+ BFD_RELOC_MT_PCINSN8,
+
+/* msp430 specific relocation codes */
+ BFD_RELOC_MSP430_10_PCREL,
+ BFD_RELOC_MSP430_16_PCREL,
+ BFD_RELOC_MSP430_16,
+ BFD_RELOC_MSP430_16_PCREL_BYTE,
+ BFD_RELOC_MSP430_16_BYTE,
+ BFD_RELOC_MSP430_2X_PCREL,
+ BFD_RELOC_MSP430_RL_PCREL,
+
+/* IQ2000 Relocations. */
+ BFD_RELOC_IQ2000_OFFSET_16,
+ BFD_RELOC_IQ2000_OFFSET_21,
+ BFD_RELOC_IQ2000_UHI16,
+
+/* Special Xtensa relocation used only by PLT entries in ELF shared
+objects to indicate that the runtime linker should set the value
+to one of its own internal functions or data structures. */
+ BFD_RELOC_XTENSA_RTLD,
+
+/* Xtensa relocations for ELF shared objects. */
+ BFD_RELOC_XTENSA_GLOB_DAT,
+ BFD_RELOC_XTENSA_JMP_SLOT,
+ BFD_RELOC_XTENSA_RELATIVE,
+
+/* Xtensa relocation used in ELF object files for symbols that may require
+PLT entries. Otherwise, this is just a generic 32-bit relocation. */
+ BFD_RELOC_XTENSA_PLT,
+
+/* Xtensa relocations to mark the difference of two local symbols.
+These are only needed to support linker relaxation and can be ignored
+when not relaxing. The field is set to the value of the difference
+assuming no relaxation. The relocation encodes the position of the
+first symbol so the linker can determine whether to adjust the field
+value. */
+ BFD_RELOC_XTENSA_DIFF8,
+ BFD_RELOC_XTENSA_DIFF16,
+ BFD_RELOC_XTENSA_DIFF32,
+
+/* Generic Xtensa relocations for instruction operands. Only the slot
+number is encoded in the relocation. The relocation applies to the
+last PC-relative immediate operand, or if there are no PC-relative
+immediates, to the last immediate operand. */
+ BFD_RELOC_XTENSA_SLOT0_OP,
+ BFD_RELOC_XTENSA_SLOT1_OP,
+ BFD_RELOC_XTENSA_SLOT2_OP,
+ BFD_RELOC_XTENSA_SLOT3_OP,
+ BFD_RELOC_XTENSA_SLOT4_OP,
+ BFD_RELOC_XTENSA_SLOT5_OP,
+ BFD_RELOC_XTENSA_SLOT6_OP,
+ BFD_RELOC_XTENSA_SLOT7_OP,
+ BFD_RELOC_XTENSA_SLOT8_OP,
+ BFD_RELOC_XTENSA_SLOT9_OP,
+ BFD_RELOC_XTENSA_SLOT10_OP,
+ BFD_RELOC_XTENSA_SLOT11_OP,
+ BFD_RELOC_XTENSA_SLOT12_OP,
+ BFD_RELOC_XTENSA_SLOT13_OP,
+ BFD_RELOC_XTENSA_SLOT14_OP,
+
+/* Alternate Xtensa relocations. Only the slot is encoded in the
+relocation. The meaning of these relocations is opcode-specific. */
+ BFD_RELOC_XTENSA_SLOT0_ALT,
+ BFD_RELOC_XTENSA_SLOT1_ALT,
+ BFD_RELOC_XTENSA_SLOT2_ALT,
+ BFD_RELOC_XTENSA_SLOT3_ALT,
+ BFD_RELOC_XTENSA_SLOT4_ALT,
+ BFD_RELOC_XTENSA_SLOT5_ALT,
+ BFD_RELOC_XTENSA_SLOT6_ALT,
+ BFD_RELOC_XTENSA_SLOT7_ALT,
+ BFD_RELOC_XTENSA_SLOT8_ALT,
+ BFD_RELOC_XTENSA_SLOT9_ALT,
+ BFD_RELOC_XTENSA_SLOT10_ALT,
+ BFD_RELOC_XTENSA_SLOT11_ALT,
+ BFD_RELOC_XTENSA_SLOT12_ALT,
+ BFD_RELOC_XTENSA_SLOT13_ALT,
+ BFD_RELOC_XTENSA_SLOT14_ALT,
+
+/* Xtensa relocations for backward compatibility. These have all been
+replaced by BFD_RELOC_XTENSA_SLOT0_OP. */
+ BFD_RELOC_XTENSA_OP0,
+ BFD_RELOC_XTENSA_OP1,
+ BFD_RELOC_XTENSA_OP2,
+
+/* Xtensa relocation to mark that the assembler expanded the
+instructions from an original target. The expansion size is
+encoded in the reloc size. */
+ BFD_RELOC_XTENSA_ASM_EXPAND,
+
+/* Xtensa relocation to mark that the linker should simplify
+assembler-expanded instructions. This is commonly used
+internally by the linker after analysis of a
+BFD_RELOC_XTENSA_ASM_EXPAND. */
+ BFD_RELOC_XTENSA_ASM_SIMPLIFY,
+
+/* Xtensa TLS relocations. */
+ BFD_RELOC_XTENSA_TLSDESC_FN,
+ BFD_RELOC_XTENSA_TLSDESC_ARG,
+ BFD_RELOC_XTENSA_TLS_DTPOFF,
+ BFD_RELOC_XTENSA_TLS_TPOFF,
+ BFD_RELOC_XTENSA_TLS_FUNC,
+ BFD_RELOC_XTENSA_TLS_ARG,
+ BFD_RELOC_XTENSA_TLS_CALL,
+
+/* 8 bit signed offset in (ix+d) or (iy+d). */
+ BFD_RELOC_Z80_DISP8,
+
+/* DJNZ offset. */
+ BFD_RELOC_Z8K_DISP7,
+
+/* CALR offset. */
+ BFD_RELOC_Z8K_CALLR,
+
+/* 4 bit value. */
+ BFD_RELOC_Z8K_IMM4L,
+
+/* Lattice Mico32 relocations. */
+ BFD_RELOC_LM32_CALL,
+ BFD_RELOC_LM32_BRANCH,
+ BFD_RELOC_LM32_16_GOT,
+ BFD_RELOC_LM32_GOTOFF_HI16,
+ BFD_RELOC_LM32_GOTOFF_LO16,
+ BFD_RELOC_LM32_COPY,
+ BFD_RELOC_LM32_GLOB_DAT,
+ BFD_RELOC_LM32_JMP_SLOT,
+ BFD_RELOC_LM32_RELATIVE,
+
+/* Difference between two section addreses. Must be followed by a
+BFD_RELOC_MACH_O_PAIR. */
+ BFD_RELOC_MACH_O_SECTDIFF,
+
+/* Like BFD_RELOC_MACH_O_SECTDIFF but with a local symbol. */
+ BFD_RELOC_MACH_O_LOCAL_SECTDIFF,
+
+/* Pair of relocation. Contains the first symbol. */
+ BFD_RELOC_MACH_O_PAIR,
+
+/* PCREL relocations. They are marked as branch to create PLT entry if
+required. */
+ BFD_RELOC_MACH_O_X86_64_BRANCH32,
+ BFD_RELOC_MACH_O_X86_64_BRANCH8,
+
+/* Used when referencing a GOT entry. */
+ BFD_RELOC_MACH_O_X86_64_GOT,
+
+/* Used when loading a GOT entry with movq. It is specially marked so that
+the linker could optimize the movq to a leaq if possible. */
+ BFD_RELOC_MACH_O_X86_64_GOT_LOAD,
+
+/* Symbol will be substracted. Must be followed by a BFD_RELOC_64. */
+ BFD_RELOC_MACH_O_X86_64_SUBTRACTOR32,
+
+/* Symbol will be substracted. Must be followed by a BFD_RELOC_64. */
+ BFD_RELOC_MACH_O_X86_64_SUBTRACTOR64,
+
+/* Same as BFD_RELOC_32_PCREL but with an implicit -1 addend. */
+ BFD_RELOC_MACH_O_X86_64_PCREL32_1,
+
+/* Same as BFD_RELOC_32_PCREL but with an implicit -2 addend. */
+ BFD_RELOC_MACH_O_X86_64_PCREL32_2,
+
+/* Same as BFD_RELOC_32_PCREL but with an implicit -4 addend. */
+ BFD_RELOC_MACH_O_X86_64_PCREL32_4,
+
+/* This is a 32 bit reloc for the microblaze that stores the
+low 16 bits of a value */
+ BFD_RELOC_MICROBLAZE_32_LO,
+
+/* This is a 32 bit pc-relative reloc for the microblaze that
+stores the low 16 bits of a value */
+ BFD_RELOC_MICROBLAZE_32_LO_PCREL,
+
+/* This is a 32 bit reloc for the microblaze that stores a
+value relative to the read-only small data area anchor */
+ BFD_RELOC_MICROBLAZE_32_ROSDA,
+
+/* This is a 32 bit reloc for the microblaze that stores a
+value relative to the read-write small data area anchor */
+ BFD_RELOC_MICROBLAZE_32_RWSDA,
+
+/* This is a 32 bit reloc for the microblaze to handle
+expressions of the form "Symbol Op Symbol" */
+ BFD_RELOC_MICROBLAZE_32_SYM_OP_SYM,
+
+/* This is a 64 bit reloc that stores the 32 bit pc relative
+value in two words (with an imm instruction). No relocation is
+done here - only used for relaxing */
+ BFD_RELOC_MICROBLAZE_64_NONE,
+
+/* This is a 64 bit reloc that stores the 32 bit pc relative
+value in two words (with an imm instruction). The relocation is
+PC-relative GOT offset */
+ BFD_RELOC_MICROBLAZE_64_GOTPC,
+
+/* This is a 64 bit reloc that stores the 32 bit pc relative
+value in two words (with an imm instruction). The relocation is
+GOT offset */
+ BFD_RELOC_MICROBLAZE_64_GOT,
+
+/* This is a 64 bit reloc that stores the 32 bit pc relative
+value in two words (with an imm instruction). The relocation is
+PC-relative offset into PLT */
+ BFD_RELOC_MICROBLAZE_64_PLT,
+
+/* This is a 64 bit reloc that stores the 32 bit GOT relative
+value in two words (with an imm instruction). The relocation is
+relative offset from _GLOBAL_OFFSET_TABLE_ */
+ BFD_RELOC_MICROBLAZE_64_GOTOFF,
+
+/* This is a 32 bit reloc that stores the 32 bit GOT relative
+value in a word. The relocation is relative offset from */
+ BFD_RELOC_MICROBLAZE_32_GOTOFF,
+
+/* This is used to tell the dynamic linker to copy the value out of
+the dynamic object into the runtime process image. */
+ BFD_RELOC_MICROBLAZE_COPY,
+
+/* Tilera TILEPro Relocations. */
+ BFD_RELOC_TILEPRO_COPY,
+ BFD_RELOC_TILEPRO_GLOB_DAT,
+ BFD_RELOC_TILEPRO_JMP_SLOT,
+ BFD_RELOC_TILEPRO_RELATIVE,
+ BFD_RELOC_TILEPRO_BROFF_X1,
+ BFD_RELOC_TILEPRO_JOFFLONG_X1,
+ BFD_RELOC_TILEPRO_JOFFLONG_X1_PLT,
+ BFD_RELOC_TILEPRO_IMM8_X0,
+ BFD_RELOC_TILEPRO_IMM8_Y0,
+ BFD_RELOC_TILEPRO_IMM8_X1,
+ BFD_RELOC_TILEPRO_IMM8_Y1,
+ BFD_RELOC_TILEPRO_DEST_IMM8_X1,
+ BFD_RELOC_TILEPRO_MT_IMM15_X1,
+ BFD_RELOC_TILEPRO_MF_IMM15_X1,
+ BFD_RELOC_TILEPRO_IMM16_X0,
+ BFD_RELOC_TILEPRO_IMM16_X1,
+ BFD_RELOC_TILEPRO_IMM16_X0_LO,
+ BFD_RELOC_TILEPRO_IMM16_X1_LO,
+ BFD_RELOC_TILEPRO_IMM16_X0_HI,
+ BFD_RELOC_TILEPRO_IMM16_X1_HI,
+ BFD_RELOC_TILEPRO_IMM16_X0_HA,
+ BFD_RELOC_TILEPRO_IMM16_X1_HA,
+ BFD_RELOC_TILEPRO_IMM16_X0_PCREL,
+ BFD_RELOC_TILEPRO_IMM16_X1_PCREL,
+ BFD_RELOC_TILEPRO_IMM16_X0_LO_PCREL,
+ BFD_RELOC_TILEPRO_IMM16_X1_LO_PCREL,
+ BFD_RELOC_TILEPRO_IMM16_X0_HI_PCREL,
+ BFD_RELOC_TILEPRO_IMM16_X1_HI_PCREL,
+ BFD_RELOC_TILEPRO_IMM16_X0_HA_PCREL,
+ BFD_RELOC_TILEPRO_IMM16_X1_HA_PCREL,
+ BFD_RELOC_TILEPRO_IMM16_X0_GOT,
+ BFD_RELOC_TILEPRO_IMM16_X1_GOT,
+ BFD_RELOC_TILEPRO_IMM16_X0_GOT_LO,
+ BFD_RELOC_TILEPRO_IMM16_X1_GOT_LO,
+ BFD_RELOC_TILEPRO_IMM16_X0_GOT_HI,
+ BFD_RELOC_TILEPRO_IMM16_X1_GOT_HI,
+ BFD_RELOC_TILEPRO_IMM16_X0_GOT_HA,
+ BFD_RELOC_TILEPRO_IMM16_X1_GOT_HA,
+ BFD_RELOC_TILEPRO_MMSTART_X0,
+ BFD_RELOC_TILEPRO_MMEND_X0,
+ BFD_RELOC_TILEPRO_MMSTART_X1,
+ BFD_RELOC_TILEPRO_MMEND_X1,
+ BFD_RELOC_TILEPRO_SHAMT_X0,
+ BFD_RELOC_TILEPRO_SHAMT_X1,
+ BFD_RELOC_TILEPRO_SHAMT_Y0,
+ BFD_RELOC_TILEPRO_SHAMT_Y1,
+ BFD_RELOC_TILEPRO_TLS_GD_CALL,
+ BFD_RELOC_TILEPRO_IMM8_X0_TLS_GD_ADD,
+ BFD_RELOC_TILEPRO_IMM8_X1_TLS_GD_ADD,
+ BFD_RELOC_TILEPRO_IMM8_Y0_TLS_GD_ADD,
+ BFD_RELOC_TILEPRO_IMM8_Y1_TLS_GD_ADD,
+ BFD_RELOC_TILEPRO_TLS_IE_LOAD,
+ BFD_RELOC_TILEPRO_IMM16_X0_TLS_GD,
+ BFD_RELOC_TILEPRO_IMM16_X1_TLS_GD,
+ BFD_RELOC_TILEPRO_IMM16_X0_TLS_GD_LO,
+ BFD_RELOC_TILEPRO_IMM16_X1_TLS_GD_LO,
+ BFD_RELOC_TILEPRO_IMM16_X0_TLS_GD_HI,
+ BFD_RELOC_TILEPRO_IMM16_X1_TLS_GD_HI,
+ BFD_RELOC_TILEPRO_IMM16_X0_TLS_GD_HA,
+ BFD_RELOC_TILEPRO_IMM16_X1_TLS_GD_HA,
+ BFD_RELOC_TILEPRO_IMM16_X0_TLS_IE,
+ BFD_RELOC_TILEPRO_IMM16_X1_TLS_IE,
+ BFD_RELOC_TILEPRO_IMM16_X0_TLS_IE_LO,
+ BFD_RELOC_TILEPRO_IMM16_X1_TLS_IE_LO,
+ BFD_RELOC_TILEPRO_IMM16_X0_TLS_IE_HI,
+ BFD_RELOC_TILEPRO_IMM16_X1_TLS_IE_HI,
+ BFD_RELOC_TILEPRO_IMM16_X0_TLS_IE_HA,
+ BFD_RELOC_TILEPRO_IMM16_X1_TLS_IE_HA,
+ BFD_RELOC_TILEPRO_TLS_DTPMOD32,
+ BFD_RELOC_TILEPRO_TLS_DTPOFF32,
+ BFD_RELOC_TILEPRO_TLS_TPOFF32,
+ BFD_RELOC_TILEPRO_IMM16_X0_TLS_LE,
+ BFD_RELOC_TILEPRO_IMM16_X1_TLS_LE,
+ BFD_RELOC_TILEPRO_IMM16_X0_TLS_LE_LO,
+ BFD_RELOC_TILEPRO_IMM16_X1_TLS_LE_LO,
+ BFD_RELOC_TILEPRO_IMM16_X0_TLS_LE_HI,
+ BFD_RELOC_TILEPRO_IMM16_X1_TLS_LE_HI,
+ BFD_RELOC_TILEPRO_IMM16_X0_TLS_LE_HA,
+ BFD_RELOC_TILEPRO_IMM16_X1_TLS_LE_HA,
+
+/* Tilera TILE-Gx Relocations. */
+ BFD_RELOC_TILEGX_HW0,
+ BFD_RELOC_TILEGX_HW1,
+ BFD_RELOC_TILEGX_HW2,
+ BFD_RELOC_TILEGX_HW3,
+ BFD_RELOC_TILEGX_HW0_LAST,
+ BFD_RELOC_TILEGX_HW1_LAST,
+ BFD_RELOC_TILEGX_HW2_LAST,
+ BFD_RELOC_TILEGX_COPY,
+ BFD_RELOC_TILEGX_GLOB_DAT,
+ BFD_RELOC_TILEGX_JMP_SLOT,
+ BFD_RELOC_TILEGX_RELATIVE,
+ BFD_RELOC_TILEGX_BROFF_X1,
+ BFD_RELOC_TILEGX_JUMPOFF_X1,
+ BFD_RELOC_TILEGX_JUMPOFF_X1_PLT,
+ BFD_RELOC_TILEGX_IMM8_X0,
+ BFD_RELOC_TILEGX_IMM8_Y0,
+ BFD_RELOC_TILEGX_IMM8_X1,
+ BFD_RELOC_TILEGX_IMM8_Y1,
+ BFD_RELOC_TILEGX_DEST_IMM8_X1,
+ BFD_RELOC_TILEGX_MT_IMM14_X1,
+ BFD_RELOC_TILEGX_MF_IMM14_X1,
+ BFD_RELOC_TILEGX_MMSTART_X0,
+ BFD_RELOC_TILEGX_MMEND_X0,
+ BFD_RELOC_TILEGX_SHAMT_X0,
+ BFD_RELOC_TILEGX_SHAMT_X1,
+ BFD_RELOC_TILEGX_SHAMT_Y0,
+ BFD_RELOC_TILEGX_SHAMT_Y1,
+ BFD_RELOC_TILEGX_IMM16_X0_HW0,
+ BFD_RELOC_TILEGX_IMM16_X1_HW0,
+ BFD_RELOC_TILEGX_IMM16_X0_HW1,
+ BFD_RELOC_TILEGX_IMM16_X1_HW1,
+ BFD_RELOC_TILEGX_IMM16_X0_HW2,
+ BFD_RELOC_TILEGX_IMM16_X1_HW2,
+ BFD_RELOC_TILEGX_IMM16_X0_HW3,
+ BFD_RELOC_TILEGX_IMM16_X1_HW3,
+ BFD_RELOC_TILEGX_IMM16_X0_HW0_LAST,
+ BFD_RELOC_TILEGX_IMM16_X1_HW0_LAST,
+ BFD_RELOC_TILEGX_IMM16_X0_HW1_LAST,
+ BFD_RELOC_TILEGX_IMM16_X1_HW1_LAST,
+ BFD_RELOC_TILEGX_IMM16_X0_HW2_LAST,
+ BFD_RELOC_TILEGX_IMM16_X1_HW2_LAST,
+ BFD_RELOC_TILEGX_IMM16_X0_HW0_PCREL,
+ BFD_RELOC_TILEGX_IMM16_X1_HW0_PCREL,
+ BFD_RELOC_TILEGX_IMM16_X0_HW1_PCREL,
+ BFD_RELOC_TILEGX_IMM16_X1_HW1_PCREL,
+ BFD_RELOC_TILEGX_IMM16_X0_HW2_PCREL,
+ BFD_RELOC_TILEGX_IMM16_X1_HW2_PCREL,
+ BFD_RELOC_TILEGX_IMM16_X0_HW3_PCREL,
+ BFD_RELOC_TILEGX_IMM16_X1_HW3_PCREL,
+ BFD_RELOC_TILEGX_IMM16_X0_HW0_LAST_PCREL,
+ BFD_RELOC_TILEGX_IMM16_X1_HW0_LAST_PCREL,
+ BFD_RELOC_TILEGX_IMM16_X0_HW1_LAST_PCREL,
+ BFD_RELOC_TILEGX_IMM16_X1_HW1_LAST_PCREL,
+ BFD_RELOC_TILEGX_IMM16_X0_HW2_LAST_PCREL,
+ BFD_RELOC_TILEGX_IMM16_X1_HW2_LAST_PCREL,
+ BFD_RELOC_TILEGX_IMM16_X0_HW0_GOT,
+ BFD_RELOC_TILEGX_IMM16_X1_HW0_GOT,
+ BFD_RELOC_TILEGX_IMM16_X0_HW0_LAST_GOT,
+ BFD_RELOC_TILEGX_IMM16_X1_HW0_LAST_GOT,
+ BFD_RELOC_TILEGX_IMM16_X0_HW1_LAST_GOT,
+ BFD_RELOC_TILEGX_IMM16_X1_HW1_LAST_GOT,
+ BFD_RELOC_TILEGX_IMM16_X0_HW0_TLS_GD,
+ BFD_RELOC_TILEGX_IMM16_X1_HW0_TLS_GD,
+ BFD_RELOC_TILEGX_IMM16_X0_HW0_TLS_LE,
+ BFD_RELOC_TILEGX_IMM16_X1_HW0_TLS_LE,
+ BFD_RELOC_TILEGX_IMM16_X0_HW0_LAST_TLS_LE,
+ BFD_RELOC_TILEGX_IMM16_X1_HW0_LAST_TLS_LE,
+ BFD_RELOC_TILEGX_IMM16_X0_HW1_LAST_TLS_LE,
+ BFD_RELOC_TILEGX_IMM16_X1_HW1_LAST_TLS_LE,
+ BFD_RELOC_TILEGX_IMM16_X0_HW0_LAST_TLS_GD,
+ BFD_RELOC_TILEGX_IMM16_X1_HW0_LAST_TLS_GD,
+ BFD_RELOC_TILEGX_IMM16_X0_HW1_LAST_TLS_GD,
+ BFD_RELOC_TILEGX_IMM16_X1_HW1_LAST_TLS_GD,
+ BFD_RELOC_TILEGX_IMM16_X0_HW0_TLS_IE,
+ BFD_RELOC_TILEGX_IMM16_X1_HW0_TLS_IE,
+ BFD_RELOC_TILEGX_IMM16_X0_HW0_LAST_TLS_IE,
+ BFD_RELOC_TILEGX_IMM16_X1_HW0_LAST_TLS_IE,
+ BFD_RELOC_TILEGX_IMM16_X0_HW1_LAST_TLS_IE,
+ BFD_RELOC_TILEGX_IMM16_X1_HW1_LAST_TLS_IE,
+ BFD_RELOC_TILEGX_TLS_DTPMOD64,
+ BFD_RELOC_TILEGX_TLS_DTPOFF64,
+ BFD_RELOC_TILEGX_TLS_TPOFF64,
+ BFD_RELOC_TILEGX_TLS_DTPMOD32,
+ BFD_RELOC_TILEGX_TLS_DTPOFF32,
+ BFD_RELOC_TILEGX_TLS_TPOFF32,
+ BFD_RELOC_TILEGX_TLS_GD_CALL,
+ BFD_RELOC_TILEGX_IMM8_X0_TLS_GD_ADD,
+ BFD_RELOC_TILEGX_IMM8_X1_TLS_GD_ADD,
+ BFD_RELOC_TILEGX_IMM8_Y0_TLS_GD_ADD,
+ BFD_RELOC_TILEGX_IMM8_Y1_TLS_GD_ADD,
+ BFD_RELOC_TILEGX_TLS_IE_LOAD,
+ BFD_RELOC_TILEGX_IMM8_X0_TLS_ADD,
+ BFD_RELOC_TILEGX_IMM8_X1_TLS_ADD,
+ BFD_RELOC_TILEGX_IMM8_Y0_TLS_ADD,
+ BFD_RELOC_TILEGX_IMM8_Y1_TLS_ADD,
+
+/* Adapteva EPIPHANY - 8 bit signed pc-relative displacement */
+ BFD_RELOC_EPIPHANY_SIMM8,
+
+/* Adapteva EPIPHANY - 24 bit signed pc-relative displacement */
+ BFD_RELOC_EPIPHANY_SIMM24,
+
+/* Adapteva EPIPHANY - 16 most-significant bits of absolute address */
+ BFD_RELOC_EPIPHANY_HIGH,
+
+/* Adapteva EPIPHANY - 16 least-significant bits of absolute address */
+ BFD_RELOC_EPIPHANY_LOW,
+
+/* Adapteva EPIPHANY - 11 bit signed number - add/sub immediate */
+ BFD_RELOC_EPIPHANY_SIMM11,
+
+/* Adapteva EPIPHANY - 11 bit sign-magnitude number (ld/st displacement) */
+ BFD_RELOC_EPIPHANY_IMM11,
+
+/* Adapteva EPIPHANY - 8 bit immediate for 16 bit mov instruction. */
+ BFD_RELOC_EPIPHANY_IMM8,
+ BFD_RELOC_UNUSED };
+typedef enum bfd_reloc_code_real bfd_reloc_code_real_type;
+reloc_howto_type *bfd_reloc_type_lookup
+ (bfd *abfd, bfd_reloc_code_real_type code);
+reloc_howto_type *bfd_reloc_name_lookup
+ (bfd *abfd, const char *reloc_name);
+
+const char *bfd_get_reloc_code_name (bfd_reloc_code_real_type code);
+
+/* Extracted from syms.c. */
+
+typedef struct bfd_symbol
+{
+ /* A pointer to the BFD which owns the symbol. This information
+ is necessary so that a back end can work out what additional
+ information (invisible to the application writer) is carried
+ with the symbol.
+
+ This field is *almost* redundant, since you can use section->owner
+ instead, except that some symbols point to the global sections
+ bfd_{abs,com,und}_section. This could be fixed by making
+ these globals be per-bfd (or per-target-flavor). FIXME. */
+ struct bfd *the_bfd; /* Use bfd_asymbol_bfd(sym) to access this field. */
+
+ /* The text of the symbol. The name is left alone, and not copied; the
+ application may not alter it. */
+ const char *name;
+
+ /* The value of the symbol. This really should be a union of a
+ numeric value with a pointer, since some flags indicate that
+ a pointer to another symbol is stored here. */
+ symvalue value;
+
+ /* Attributes of a symbol. */
+#define BSF_NO_FLAGS 0x00
+
+ /* The symbol has local scope; <<static>> in <<C>>. The value
+ is the offset into the section of the data. */
+#define BSF_LOCAL (1 << 0)
+
+ /* The symbol has global scope; initialized data in <<C>>. The
+ value is the offset into the section of the data. */
+#define BSF_GLOBAL (1 << 1)
+
+ /* The symbol has global scope and is exported. The value is
+ the offset into the section of the data. */
+#define BSF_EXPORT BSF_GLOBAL /* No real difference. */
+
+ /* A normal C symbol would be one of:
+ <<BSF_LOCAL>>, <<BSF_COMMON>>, <<BSF_UNDEFINED>> or
+ <<BSF_GLOBAL>>. */
+
+ /* The symbol is a debugging record. The value has an arbitrary
+ meaning, unless BSF_DEBUGGING_RELOC is also set. */
+#define BSF_DEBUGGING (1 << 2)
+
+ /* The symbol denotes a function entry point. Used in ELF,
+ perhaps others someday. */
+#define BSF_FUNCTION (1 << 3)
+
+ /* Used by the linker. */
+#define BSF_KEEP (1 << 5)
+#define BSF_KEEP_G (1 << 6)
+
+ /* A weak global symbol, overridable without warnings by
+ a regular global symbol of the same name. */
+#define BSF_WEAK (1 << 7)
+
+ /* This symbol was created to point to a section, e.g. ELF's
+ STT_SECTION symbols. */
+#define BSF_SECTION_SYM (1 << 8)
+
+ /* The symbol used to be a common symbol, but now it is
+ allocated. */
+#define BSF_OLD_COMMON (1 << 9)
+
+ /* In some files the type of a symbol sometimes alters its
+ location in an output file - ie in coff a <<ISFCN>> symbol
+ which is also <<C_EXT>> symbol appears where it was
+ declared and not at the end of a section. This bit is set
+ by the target BFD part to convey this information. */
+#define BSF_NOT_AT_END (1 << 10)
+
+ /* Signal that the symbol is the label of constructor section. */
+#define BSF_CONSTRUCTOR (1 << 11)
+
+ /* Signal that the symbol is a warning symbol. The name is a
+ warning. The name of the next symbol is the one to warn about;
+ if a reference is made to a symbol with the same name as the next
+ symbol, a warning is issued by the linker. */
+#define BSF_WARNING (1 << 12)
+
+ /* Signal that the symbol is indirect. This symbol is an indirect
+ pointer to the symbol with the same name as the next symbol. */
+#define BSF_INDIRECT (1 << 13)
+
+ /* BSF_FILE marks symbols that contain a file name. This is used
+ for ELF STT_FILE symbols. */
+#define BSF_FILE (1 << 14)
+
+ /* Symbol is from dynamic linking information. */
+#define BSF_DYNAMIC (1 << 15)
+
+ /* The symbol denotes a data object. Used in ELF, and perhaps
+ others someday. */
+#define BSF_OBJECT (1 << 16)
+
+ /* This symbol is a debugging symbol. The value is the offset
+ into the section of the data. BSF_DEBUGGING should be set
+ as well. */
+#define BSF_DEBUGGING_RELOC (1 << 17)
+
+ /* This symbol is thread local. Used in ELF. */
+#define BSF_THREAD_LOCAL (1 << 18)
+
+ /* This symbol represents a complex relocation expression,
+ with the expression tree serialized in the symbol name. */
+#define BSF_RELC (1 << 19)
+
+ /* This symbol represents a signed complex relocation expression,
+ with the expression tree serialized in the symbol name. */
+#define BSF_SRELC (1 << 20)
+
+ /* This symbol was created by bfd_get_synthetic_symtab. */
+#define BSF_SYNTHETIC (1 << 21)
+
+ /* This symbol is an indirect code object. Unrelated to BSF_INDIRECT.
+ The dynamic linker will compute the value of this symbol by
+ calling the function that it points to. BSF_FUNCTION must
+ also be also set. */
+#define BSF_GNU_INDIRECT_FUNCTION (1 << 22)
+ /* This symbol is a globally unique data object. The dynamic linker
+ will make sure that in the entire process there is just one symbol
+ with this name and type in use. BSF_OBJECT must also be set. */
+#define BSF_GNU_UNIQUE (1 << 23)
+
+ flagword flags;
+
+ /* A pointer to the section to which this symbol is
+ relative. This will always be non NULL, there are special
+ sections for undefined and absolute symbols. */
+ struct bfd_section *section;
+
+ /* Back end special data. */
+ union
+ {
+ void *p;
+ bfd_vma i;
+ }
+ udata;
+}
+asymbol;
+
+#define bfd_get_symtab_upper_bound(abfd) \
+ BFD_SEND (abfd, _bfd_get_symtab_upper_bound, (abfd))
+
+bfd_boolean bfd_is_local_label (bfd *abfd, asymbol *sym);
+
+bfd_boolean bfd_is_local_label_name (bfd *abfd, const char *name);
+
+#define bfd_is_local_label_name(abfd, name) \
+ BFD_SEND (abfd, _bfd_is_local_label_name, (abfd, name))
+
+bfd_boolean bfd_is_target_special_symbol (bfd *abfd, asymbol *sym);
+
+#define bfd_is_target_special_symbol(abfd, sym) \
+ BFD_SEND (abfd, _bfd_is_target_special_symbol, (abfd, sym))
+
+#define bfd_canonicalize_symtab(abfd, location) \
+ BFD_SEND (abfd, _bfd_canonicalize_symtab, (abfd, location))
+
+bfd_boolean bfd_set_symtab
+ (bfd *abfd, asymbol **location, unsigned int count);
+
+void bfd_print_symbol_vandf (bfd *abfd, void *file, asymbol *symbol);
+
+#define bfd_make_empty_symbol(abfd) \
+ BFD_SEND (abfd, _bfd_make_empty_symbol, (abfd))
+
+asymbol *_bfd_generic_make_empty_symbol (bfd *);
+
+#define bfd_make_debug_symbol(abfd,ptr,size) \
+ BFD_SEND (abfd, _bfd_make_debug_symbol, (abfd, ptr, size))
+
+int bfd_decode_symclass (asymbol *symbol);
+
+bfd_boolean bfd_is_undefined_symclass (int symclass);
+
+void bfd_symbol_info (asymbol *symbol, symbol_info *ret);
+
+bfd_boolean bfd_copy_private_symbol_data
+ (bfd *ibfd, asymbol *isym, bfd *obfd, asymbol *osym);
+
+#define bfd_copy_private_symbol_data(ibfd, isymbol, obfd, osymbol) \
+ BFD_SEND (obfd, _bfd_copy_private_symbol_data, \
+ (ibfd, isymbol, obfd, osymbol))
+
+/* Extracted from bfd.c. */
+enum bfd_direction
+ {
+ no_direction = 0,
+ read_direction = 1,
+ write_direction = 2,
+ both_direction = 3
+ };
+
+struct bfd
+{
+ /* A unique identifier of the BFD */
+ unsigned int id;
+
+ /* The filename the application opened the BFD with. */
+ const char *filename;
+
+ /* A pointer to the target jump table. */
+ const struct bfd_target *xvec;
+
+ /* The IOSTREAM, and corresponding IO vector that provide access
+ to the file backing the BFD. */
+ void *iostream;
+ const struct bfd_iovec *iovec;
+
+ /* The caching routines use these to maintain a
+ least-recently-used list of BFDs. */
+ struct bfd *lru_prev, *lru_next;
+
+ /* When a file is closed by the caching routines, BFD retains
+ state information on the file here... */
+ ufile_ptr where;
+
+ /* File modified time, if mtime_set is TRUE. */
+ long mtime;
+
+ /* Reserved for an unimplemented file locking extension. */
+ int ifd;
+
+ /* The format which belongs to the BFD. (object, core, etc.) */
+ bfd_format format;
+
+ /* The direction with which the BFD was opened. */
+ enum bfd_direction direction;
+
+ /* Format_specific flags. */
+ flagword flags;
+
+ /* Values that may appear in the flags field of a BFD. These also
+ appear in the object_flags field of the bfd_target structure, where
+ they indicate the set of flags used by that backend (not all flags
+ are meaningful for all object file formats) (FIXME: at the moment,
+ the object_flags values have mostly just been copied from backend
+ to another, and are not necessarily correct). */
+
+#define BFD_NO_FLAGS 0x00
+
+ /* BFD contains relocation entries. */
+#define HAS_RELOC 0x01
+
+ /* BFD is directly executable. */
+#define EXEC_P 0x02
+
+ /* BFD has line number information (basically used for F_LNNO in a
+ COFF header). */
+#define HAS_LINENO 0x04
+
+ /* BFD has debugging information. */
+#define HAS_DEBUG 0x08
+
+ /* BFD has symbols. */
+#define HAS_SYMS 0x10
+
+ /* BFD has local symbols (basically used for F_LSYMS in a COFF
+ header). */
+#define HAS_LOCALS 0x20
+
+ /* BFD is a dynamic object. */
+#define DYNAMIC 0x40
+
+ /* Text section is write protected (if D_PAGED is not set, this is
+ like an a.out NMAGIC file) (the linker sets this by default, but
+ clears it for -r or -N). */
+#define WP_TEXT 0x80
+
+ /* BFD is dynamically paged (this is like an a.out ZMAGIC file) (the
+ linker sets this by default, but clears it for -r or -n or -N). */
+#define D_PAGED 0x100
+
+ /* BFD is relaxable (this means that bfd_relax_section may be able to
+ do something) (sometimes bfd_relax_section can do something even if
+ this is not set). */
+#define BFD_IS_RELAXABLE 0x200
+
+ /* This may be set before writing out a BFD to request using a
+ traditional format. For example, this is used to request that when
+ writing out an a.out object the symbols not be hashed to eliminate
+ duplicates. */
+#define BFD_TRADITIONAL_FORMAT 0x400
+
+ /* This flag indicates that the BFD contents are actually cached
+ in memory. If this is set, iostream points to a bfd_in_memory
+ struct. */
+#define BFD_IN_MEMORY 0x800
+
+ /* The sections in this BFD specify a memory page. */
+#define HAS_LOAD_PAGE 0x1000
+
+ /* This BFD has been created by the linker and doesn't correspond
+ to any input file. */
+#define BFD_LINKER_CREATED 0x2000
+
+ /* This may be set before writing out a BFD to request that it
+ be written using values for UIDs, GIDs, timestamps, etc. that
+ will be consistent from run to run. */
+#define BFD_DETERMINISTIC_OUTPUT 0x4000
+
+ /* Compress sections in this BFD. */
+#define BFD_COMPRESS 0x8000
+
+ /* Decompress sections in this BFD. */
+#define BFD_DECOMPRESS 0x10000
+
+ /* BFD is a dummy, for plugins. */
+#define BFD_PLUGIN 0x20000
+
+ /* Flags bits to be saved in bfd_preserve_save. */
+#define BFD_FLAGS_SAVED \
+ (BFD_IN_MEMORY | BFD_COMPRESS | BFD_DECOMPRESS | BFD_PLUGIN)
+
+ /* Flags bits which are for BFD use only. */
+#define BFD_FLAGS_FOR_BFD_USE_MASK \
+ (BFD_IN_MEMORY | BFD_COMPRESS | BFD_DECOMPRESS | BFD_LINKER_CREATED \
+ | BFD_PLUGIN | BFD_TRADITIONAL_FORMAT | BFD_DETERMINISTIC_OUTPUT)
+
+ /* Currently my_archive is tested before adding origin to
+ anything. I believe that this can become always an add of
+ origin, with origin set to 0 for non archive files. */
+ ufile_ptr origin;
+
+ /* The origin in the archive of the proxy entry. This will
+ normally be the same as origin, except for thin archives,
+ when it will contain the current offset of the proxy in the
+ thin archive rather than the offset of the bfd in its actual
+ container. */
+ ufile_ptr proxy_origin;
+
+ /* A hash table for section names. */
+ struct bfd_hash_table section_htab;
+
+ /* Pointer to linked list of sections. */
+ struct bfd_section *sections;
+
+ /* The last section on the section list. */
+ struct bfd_section *section_last;
+
+ /* The number of sections. */
+ unsigned int section_count;
+
+ /* Stuff only useful for object files:
+ The start address. */
+ bfd_vma start_address;
+
+ /* Used for input and output. */
+ unsigned int symcount;
+
+ /* Symbol table for output BFD (with symcount entries).
+ Also used by the linker to cache input BFD symbols. */
+ struct bfd_symbol **outsymbols;
+
+ /* Used for slurped dynamic symbol tables. */
+ unsigned int dynsymcount;
+
+ /* Pointer to structure which contains architecture information. */
+ const struct bfd_arch_info *arch_info;
+
+ /* Stuff only useful for archives. */
+ void *arelt_data;
+ struct bfd *my_archive; /* The containing archive BFD. */
+ struct bfd *archive_next; /* The next BFD in the archive. */
+ struct bfd *archive_head; /* The first BFD in the archive. */
+ struct bfd *nested_archives; /* List of nested archive in a flattened
+ thin archive. */
+
+ /* A chain of BFD structures involved in a link. */
+ struct bfd *link_next;
+
+ /* A field used by _bfd_generic_link_add_archive_symbols. This will
+ be used only for archive elements. */
+ int archive_pass;
+
+ /* Used by the back end to hold private data. */
+ union
+ {
+ struct aout_data_struct *aout_data;
+ struct artdata *aout_ar_data;
+ struct _oasys_data *oasys_obj_data;
+ struct _oasys_ar_data *oasys_ar_data;
+ struct coff_tdata *coff_obj_data;
+ struct pe_tdata *pe_obj_data;
+ struct xcoff_tdata *xcoff_obj_data;
+ struct ecoff_tdata *ecoff_obj_data;
+ struct ieee_data_struct *ieee_data;
+ struct ieee_ar_data_struct *ieee_ar_data;
+ struct srec_data_struct *srec_data;
+ struct verilog_data_struct *verilog_data;
+ struct ihex_data_struct *ihex_data;
+ struct tekhex_data_struct *tekhex_data;
+ struct elf_obj_tdata *elf_obj_data;
+ struct nlm_obj_tdata *nlm_obj_data;
+ struct bout_data_struct *bout_data;
+ struct mmo_data_struct *mmo_data;
+ struct sun_core_struct *sun_core_data;
+ struct sco5_core_struct *sco5_core_data;
+ struct trad_core_struct *trad_core_data;
+ struct som_data_struct *som_data;
+ struct hpux_core_struct *hpux_core_data;
+ struct hppabsd_core_struct *hppabsd_core_data;
+ struct sgi_core_struct *sgi_core_data;
+ struct lynx_core_struct *lynx_core_data;
+ struct osf_core_struct *osf_core_data;
+ struct cisco_core_struct *cisco_core_data;
+ struct versados_data_struct *versados_data;
+ struct netbsd_core_struct *netbsd_core_data;
+ struct mach_o_data_struct *mach_o_data;
+ struct mach_o_fat_data_struct *mach_o_fat_data;
+ struct plugin_data_struct *plugin_data;
+ struct bfd_pef_data_struct *pef_data;
+ struct bfd_pef_xlib_data_struct *pef_xlib_data;
+ struct bfd_sym_data_struct *sym_data;
+ void *any;
+ }
+ tdata;
+
+ /* Used by the application to hold private data. */
+ void *usrdata;
+
+ /* Where all the allocated stuff under this BFD goes. This is a
+ struct objalloc *, but we use void * to avoid requiring the inclusion
+ of objalloc.h. */
+ void *memory;
+
+ /* Is the file descriptor being cached? That is, can it be closed as
+ needed, and re-opened when accessed later? */
+ unsigned int cacheable : 1;
+
+ /* Marks whether there was a default target specified when the
+ BFD was opened. This is used to select which matching algorithm
+ to use to choose the back end. */
+ unsigned int target_defaulted : 1;
+
+ /* ... and here: (``once'' means at least once). */
+ unsigned int opened_once : 1;
+
+ /* Set if we have a locally maintained mtime value, rather than
+ getting it from the file each time. */
+ unsigned int mtime_set : 1;
+
+ /* Flag set if symbols from this BFD should not be exported. */
+ unsigned int no_export : 1;
+
+ /* Remember when output has begun, to stop strange things
+ from happening. */
+ unsigned int output_has_begun : 1;
+
+ /* Have archive map. */
+ unsigned int has_armap : 1;
+
+ /* Set if this is a thin archive. */
+ unsigned int is_thin_archive : 1;
+
+ /* Set if only required symbols should be added in the link hash table for
+ this object. Used by VMS linkers. */
+ unsigned int selective_search : 1;
+};
+
+typedef enum bfd_error
+{
+ bfd_error_no_error = 0,
+ bfd_error_system_call,
+ bfd_error_invalid_target,
+ bfd_error_wrong_format,
+ bfd_error_wrong_object_format,
+ bfd_error_invalid_operation,
+ bfd_error_no_memory,
+ bfd_error_no_symbols,
+ bfd_error_no_armap,
+ bfd_error_no_more_archived_files,
+ bfd_error_malformed_archive,
+ bfd_error_file_not_recognized,
+ bfd_error_file_ambiguously_recognized,
+ bfd_error_no_contents,
+ bfd_error_nonrepresentable_section,
+ bfd_error_no_debug_section,
+ bfd_error_bad_value,
+ bfd_error_file_truncated,
+ bfd_error_file_too_big,
+ bfd_error_on_input,
+ bfd_error_invalid_error_code
+}
+bfd_error_type;
+
+bfd_error_type bfd_get_error (void);
+
+void bfd_set_error (bfd_error_type error_tag, ...);
+
+const char *bfd_errmsg (bfd_error_type error_tag);
+
+void bfd_perror (const char *message);
+
+typedef void (*bfd_error_handler_type) (const char *, ...);
+
+bfd_error_handler_type bfd_set_error_handler (bfd_error_handler_type);
+
+void bfd_set_error_program_name (const char *);
+
+bfd_error_handler_type bfd_get_error_handler (void);
+
+typedef void (*bfd_assert_handler_type) (const char *bfd_formatmsg,
+ const char *bfd_version,
+ const char *bfd_file,
+ int bfd_line);
+
+bfd_assert_handler_type bfd_set_assert_handler (bfd_assert_handler_type);
+
+bfd_assert_handler_type bfd_get_assert_handler (void);
+
+long bfd_get_reloc_upper_bound (bfd *abfd, asection *sect);
+
+long bfd_canonicalize_reloc
+ (bfd *abfd, asection *sec, arelent **loc, asymbol **syms);
+
+void bfd_set_reloc
+ (bfd *abfd, asection *sec, arelent **rel, unsigned int count);
+
+bfd_boolean bfd_set_file_flags (bfd *abfd, flagword flags);
+
+int bfd_get_arch_size (bfd *abfd);
+
+int bfd_get_sign_extend_vma (bfd *abfd);
+
+bfd_boolean bfd_set_start_address (bfd *abfd, bfd_vma vma);
+
+unsigned int bfd_get_gp_size (bfd *abfd);
+
+void bfd_set_gp_size (bfd *abfd, unsigned int i);
+
+bfd_vma bfd_scan_vma (const char *string, const char **end, int base);
+
+bfd_boolean bfd_copy_private_header_data (bfd *ibfd, bfd *obfd);
+
+#define bfd_copy_private_header_data(ibfd, obfd) \
+ BFD_SEND (obfd, _bfd_copy_private_header_data, \
+ (ibfd, obfd))
+bfd_boolean bfd_copy_private_bfd_data (bfd *ibfd, bfd *obfd);
+
+#define bfd_copy_private_bfd_data(ibfd, obfd) \
+ BFD_SEND (obfd, _bfd_copy_private_bfd_data, \
+ (ibfd, obfd))
+bfd_boolean bfd_merge_private_bfd_data (bfd *ibfd, bfd *obfd);
+
+#define bfd_merge_private_bfd_data(ibfd, obfd) \
+ BFD_SEND (obfd, _bfd_merge_private_bfd_data, \
+ (ibfd, obfd))
+bfd_boolean bfd_set_private_flags (bfd *abfd, flagword flags);
+
+#define bfd_set_private_flags(abfd, flags) \
+ BFD_SEND (abfd, _bfd_set_private_flags, (abfd, flags))
+#define bfd_sizeof_headers(abfd, info) \
+ BFD_SEND (abfd, _bfd_sizeof_headers, (abfd, info))
+
+#define bfd_find_nearest_line(abfd, sec, syms, off, file, func, line) \
+ BFD_SEND (abfd, _bfd_find_nearest_line, \
+ (abfd, sec, syms, off, file, func, line))
+
+#define bfd_find_nearest_line_discriminator(abfd, sec, syms, off, file, func, \
+ line, disc) \
+ BFD_SEND (abfd, _bfd_find_nearest_line_discriminator, \
+ (abfd, sec, syms, off, file, func, line, disc))
+
+#define bfd_find_line(abfd, syms, sym, file, line) \
+ BFD_SEND (abfd, _bfd_find_line, \
+ (abfd, syms, sym, file, line))
+
+#define bfd_find_inliner_info(abfd, file, func, line) \
+ BFD_SEND (abfd, _bfd_find_inliner_info, \
+ (abfd, file, func, line))
+
+#define bfd_debug_info_start(abfd) \
+ BFD_SEND (abfd, _bfd_debug_info_start, (abfd))
+
+#define bfd_debug_info_end(abfd) \
+ BFD_SEND (abfd, _bfd_debug_info_end, (abfd))
+
+#define bfd_debug_info_accumulate(abfd, section) \
+ BFD_SEND (abfd, _bfd_debug_info_accumulate, (abfd, section))
+
+#define bfd_stat_arch_elt(abfd, stat) \
+ BFD_SEND (abfd, _bfd_stat_arch_elt,(abfd, stat))
+
+#define bfd_update_armap_timestamp(abfd) \
+ BFD_SEND (abfd, _bfd_update_armap_timestamp, (abfd))
+
+#define bfd_set_arch_mach(abfd, arch, mach)\
+ BFD_SEND ( abfd, _bfd_set_arch_mach, (abfd, arch, mach))
+
+#define bfd_relax_section(abfd, section, link_info, again) \
+ BFD_SEND (abfd, _bfd_relax_section, (abfd, section, link_info, again))
+
+#define bfd_gc_sections(abfd, link_info) \
+ BFD_SEND (abfd, _bfd_gc_sections, (abfd, link_info))
+
+#define bfd_lookup_section_flags(link_info, flag_info, section) \
+ BFD_SEND (abfd, _bfd_lookup_section_flags, (link_info, flag_info, section))
+
+#define bfd_merge_sections(abfd, link_info) \
+ BFD_SEND (abfd, _bfd_merge_sections, (abfd, link_info))
+
+#define bfd_is_group_section(abfd, sec) \
+ BFD_SEND (abfd, _bfd_is_group_section, (abfd, sec))
+
+#define bfd_discard_group(abfd, sec) \
+ BFD_SEND (abfd, _bfd_discard_group, (abfd, sec))
+
+#define bfd_link_hash_table_create(abfd) \
+ BFD_SEND (abfd, _bfd_link_hash_table_create, (abfd))
+
+#define bfd_link_hash_table_free(abfd, hash) \
+ BFD_SEND (abfd, _bfd_link_hash_table_free, (hash))
+
+#define bfd_link_add_symbols(abfd, info) \
+ BFD_SEND (abfd, _bfd_link_add_symbols, (abfd, info))
+
+#define bfd_link_just_syms(abfd, sec, info) \
+ BFD_SEND (abfd, _bfd_link_just_syms, (sec, info))
+
+#define bfd_final_link(abfd, info) \
+ BFD_SEND (abfd, _bfd_final_link, (abfd, info))
+
+#define bfd_free_cached_info(abfd) \
+ BFD_SEND (abfd, _bfd_free_cached_info, (abfd))
+
+#define bfd_get_dynamic_symtab_upper_bound(abfd) \
+ BFD_SEND (abfd, _bfd_get_dynamic_symtab_upper_bound, (abfd))
+
+#define bfd_print_private_bfd_data(abfd, file)\
+ BFD_SEND (abfd, _bfd_print_private_bfd_data, (abfd, file))
+
+#define bfd_canonicalize_dynamic_symtab(abfd, asymbols) \
+ BFD_SEND (abfd, _bfd_canonicalize_dynamic_symtab, (abfd, asymbols))
+
+#define bfd_get_synthetic_symtab(abfd, count, syms, dyncount, dynsyms, ret) \
+ BFD_SEND (abfd, _bfd_get_synthetic_symtab, (abfd, count, syms, \
+ dyncount, dynsyms, ret))
+
+#define bfd_get_dynamic_reloc_upper_bound(abfd) \
+ BFD_SEND (abfd, _bfd_get_dynamic_reloc_upper_bound, (abfd))
+
+#define bfd_canonicalize_dynamic_reloc(abfd, arels, asyms) \
+ BFD_SEND (abfd, _bfd_canonicalize_dynamic_reloc, (abfd, arels, asyms))
+
+extern bfd_byte *bfd_get_relocated_section_contents
+ (bfd *, struct bfd_link_info *, struct bfd_link_order *, bfd_byte *,
+ bfd_boolean, asymbol **);
+
+bfd_boolean bfd_alt_mach_code (bfd *abfd, int alternative);
+
+struct bfd_preserve
+{
+ void *marker;
+ void *tdata;
+ flagword flags;
+ const struct bfd_arch_info *arch_info;
+ struct bfd_section *sections;
+ struct bfd_section *section_last;
+ unsigned int section_count;
+ struct bfd_hash_table section_htab;
+};
+
+bfd_boolean bfd_preserve_save (bfd *, struct bfd_preserve *);
+
+void bfd_preserve_restore (bfd *, struct bfd_preserve *);
+
+void bfd_preserve_finish (bfd *, struct bfd_preserve *);
+
+bfd_vma bfd_emul_get_maxpagesize (const char *);
+
+void bfd_emul_set_maxpagesize (const char *, bfd_vma);
+
+bfd_vma bfd_emul_get_commonpagesize (const char *);
+
+void bfd_emul_set_commonpagesize (const char *, bfd_vma);
+
+char *bfd_demangle (bfd *, const char *, int);
+
+/* Extracted from archive.c. */
+symindex bfd_get_next_mapent
+ (bfd *abfd, symindex previous, carsym **sym);
+
+bfd_boolean bfd_set_archive_head (bfd *output, bfd *new_head);
+
+bfd *bfd_openr_next_archived_file (bfd *archive, bfd *previous);
+
+/* Extracted from corefile.c. */
+const char *bfd_core_file_failing_command (bfd *abfd);
+
+int bfd_core_file_failing_signal (bfd *abfd);
+
+int bfd_core_file_pid (bfd *abfd);
+
+bfd_boolean core_file_matches_executable_p
+ (bfd *core_bfd, bfd *exec_bfd);
+
+bfd_boolean generic_core_file_matches_executable_p
+ (bfd *core_bfd, bfd *exec_bfd);
+
+/* Extracted from targets.c. */
+#define BFD_SEND(bfd, message, arglist) \
+ ((*((bfd)->xvec->message)) arglist)
+
+#ifdef DEBUG_BFD_SEND
+#undef BFD_SEND
+#define BFD_SEND(bfd, message, arglist) \
+ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \
+ ((*((bfd)->xvec->message)) arglist) : \
+ (bfd_assert (__FILE__,__LINE__), NULL))
+#endif
+#define BFD_SEND_FMT(bfd, message, arglist) \
+ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist)
+
+#ifdef DEBUG_BFD_SEND
+#undef BFD_SEND_FMT
+#define BFD_SEND_FMT(bfd, message, arglist) \
+ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \
+ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist) : \
+ (bfd_assert (__FILE__,__LINE__), NULL))
+#endif
+
+enum bfd_flavour
+{
+ bfd_target_unknown_flavour,
+ bfd_target_aout_flavour,
+ bfd_target_coff_flavour,
+ bfd_target_ecoff_flavour,
+ bfd_target_xcoff_flavour,
+ bfd_target_elf_flavour,
+ bfd_target_ieee_flavour,
+ bfd_target_nlm_flavour,
+ bfd_target_oasys_flavour,
+ bfd_target_tekhex_flavour,
+ bfd_target_srec_flavour,
+ bfd_target_verilog_flavour,
+ bfd_target_ihex_flavour,
+ bfd_target_som_flavour,
+ bfd_target_os9k_flavour,
+ bfd_target_versados_flavour,
+ bfd_target_msdos_flavour,
+ bfd_target_ovax_flavour,
+ bfd_target_evax_flavour,
+ bfd_target_mmo_flavour,
+ bfd_target_mach_o_flavour,
+ bfd_target_pef_flavour,
+ bfd_target_pef_xlib_flavour,
+ bfd_target_sym_flavour
+};
+
+enum bfd_endian { BFD_ENDIAN_BIG, BFD_ENDIAN_LITTLE, BFD_ENDIAN_UNKNOWN };
+
+/* Forward declaration. */
+typedef struct bfd_link_info _bfd_link_info;
+
+/* Forward declaration. */
+typedef struct flag_info flag_info;
+
+typedef struct bfd_target
+{
+ /* Identifies the kind of target, e.g., SunOS4, Ultrix, etc. */
+ char *name;
+
+ /* The "flavour" of a back end is a general indication about
+ the contents of a file. */
+ enum bfd_flavour flavour;
+
+ /* The order of bytes within the data area of a file. */
+ enum bfd_endian byteorder;
+
+ /* The order of bytes within the header parts of a file. */
+ enum bfd_endian header_byteorder;
+
+ /* A mask of all the flags which an executable may have set -
+ from the set <<BFD_NO_FLAGS>>, <<HAS_RELOC>>, ...<<D_PAGED>>. */
+ flagword object_flags;
+
+ /* A mask of all the flags which a section may have set - from
+ the set <<SEC_NO_FLAGS>>, <<SEC_ALLOC>>, ...<<SET_NEVER_LOAD>>. */
+ flagword section_flags;
+
+ /* The character normally found at the front of a symbol.
+ (if any), perhaps `_'. */
+ char symbol_leading_char;
+
+ /* The pad character for file names within an archive header. */
+ char ar_pad_char;
+
+ /* The maximum number of characters in an archive header. */
+ unsigned char ar_max_namelen;
+
+ /* How well this target matches, used to select between various
+ possible targets when more than one target matches. */
+ unsigned char match_priority;
+
+ /* Entries for byte swapping for data. These are different from the
+ other entry points, since they don't take a BFD as the first argument.
+ Certain other handlers could do the same. */
+ bfd_uint64_t (*bfd_getx64) (const void *);
+ bfd_int64_t (*bfd_getx_signed_64) (const void *);
+ void (*bfd_putx64) (bfd_uint64_t, void *);
+ bfd_vma (*bfd_getx32) (const void *);
+ bfd_signed_vma (*bfd_getx_signed_32) (const void *);
+ void (*bfd_putx32) (bfd_vma, void *);
+ bfd_vma (*bfd_getx16) (const void *);
+ bfd_signed_vma (*bfd_getx_signed_16) (const void *);
+ void (*bfd_putx16) (bfd_vma, void *);
+
+ /* Byte swapping for the headers. */
+ bfd_uint64_t (*bfd_h_getx64) (const void *);
+ bfd_int64_t (*bfd_h_getx_signed_64) (const void *);
+ void (*bfd_h_putx64) (bfd_uint64_t, void *);
+ bfd_vma (*bfd_h_getx32) (const void *);
+ bfd_signed_vma (*bfd_h_getx_signed_32) (const void *);
+ void (*bfd_h_putx32) (bfd_vma, void *);
+ bfd_vma (*bfd_h_getx16) (const void *);
+ bfd_signed_vma (*bfd_h_getx_signed_16) (const void *);
+ void (*bfd_h_putx16) (bfd_vma, void *);
+
+ /* Format dependent routines: these are vectors of entry points
+ within the target vector structure, one for each format to check. */
+
+ /* Check the format of a file being read. Return a <<bfd_target *>> or zero. */
+ const struct bfd_target *(*_bfd_check_format[bfd_type_end]) (bfd *);
+
+ /* Set the format of a file being written. */
+ bfd_boolean (*_bfd_set_format[bfd_type_end]) (bfd *);
+
+ /* Write cached information into a file being written, at <<bfd_close>>. */
+ bfd_boolean (*_bfd_write_contents[bfd_type_end]) (bfd *);
+
+
+ /* Generic entry points. */
+#define BFD_JUMP_TABLE_GENERIC(NAME) \
+ NAME##_close_and_cleanup, \
+ NAME##_bfd_free_cached_info, \
+ NAME##_new_section_hook, \
+ NAME##_get_section_contents, \
+ NAME##_get_section_contents_in_window
+
+ /* Called when the BFD is being closed to do any necessary cleanup. */
+ bfd_boolean (*_close_and_cleanup) (bfd *);
+ /* Ask the BFD to free all cached information. */
+ bfd_boolean (*_bfd_free_cached_info) (bfd *);
+ /* Called when a new section is created. */
+ bfd_boolean (*_new_section_hook) (bfd *, sec_ptr);
+ /* Read the contents of a section. */
+ bfd_boolean (*_bfd_get_section_contents)
+ (bfd *, sec_ptr, void *, file_ptr, bfd_size_type);
+ bfd_boolean (*_bfd_get_section_contents_in_window)
+ (bfd *, sec_ptr, bfd_window *, file_ptr, bfd_size_type);
+
+ /* Entry points to copy private data. */
+#define BFD_JUMP_TABLE_COPY(NAME) \
+ NAME##_bfd_copy_private_bfd_data, \
+ NAME##_bfd_merge_private_bfd_data, \
+ _bfd_generic_init_private_section_data, \
+ NAME##_bfd_copy_private_section_data, \
+ NAME##_bfd_copy_private_symbol_data, \
+ NAME##_bfd_copy_private_header_data, \
+ NAME##_bfd_set_private_flags, \
+ NAME##_bfd_print_private_bfd_data
+
+ /* Called to copy BFD general private data from one object file
+ to another. */
+ bfd_boolean (*_bfd_copy_private_bfd_data) (bfd *, bfd *);
+ /* Called to merge BFD general private data from one object file
+ to a common output file when linking. */
+ bfd_boolean (*_bfd_merge_private_bfd_data) (bfd *, bfd *);
+ /* Called to initialize BFD private section data from one object file
+ to another. */
+#define bfd_init_private_section_data(ibfd, isec, obfd, osec, link_info) \
+ BFD_SEND (obfd, _bfd_init_private_section_data, (ibfd, isec, obfd, osec, link_info))
+ bfd_boolean (*_bfd_init_private_section_data)
+ (bfd *, sec_ptr, bfd *, sec_ptr, struct bfd_link_info *);
+ /* Called to copy BFD private section data from one object file
+ to another. */
+ bfd_boolean (*_bfd_copy_private_section_data)
+ (bfd *, sec_ptr, bfd *, sec_ptr);
+ /* Called to copy BFD private symbol data from one symbol
+ to another. */
+ bfd_boolean (*_bfd_copy_private_symbol_data)
+ (bfd *, asymbol *, bfd *, asymbol *);
+ /* Called to copy BFD private header data from one object file
+ to another. */
+ bfd_boolean (*_bfd_copy_private_header_data)
+ (bfd *, bfd *);
+ /* Called to set private backend flags. */
+ bfd_boolean (*_bfd_set_private_flags) (bfd *, flagword);
+
+ /* Called to print private BFD data. */
+ bfd_boolean (*_bfd_print_private_bfd_data) (bfd *, void *);
+
+ /* Core file entry points. */
+#define BFD_JUMP_TABLE_CORE(NAME) \
+ NAME##_core_file_failing_command, \
+ NAME##_core_file_failing_signal, \
+ NAME##_core_file_matches_executable_p, \
+ NAME##_core_file_pid
+
+ char * (*_core_file_failing_command) (bfd *);
+ int (*_core_file_failing_signal) (bfd *);
+ bfd_boolean (*_core_file_matches_executable_p) (bfd *, bfd *);
+ int (*_core_file_pid) (bfd *);
+
+ /* Archive entry points. */
+#define BFD_JUMP_TABLE_ARCHIVE(NAME) \
+ NAME##_slurp_armap, \
+ NAME##_slurp_extended_name_table, \
+ NAME##_construct_extended_name_table, \
+ NAME##_truncate_arname, \
+ NAME##_write_armap, \
+ NAME##_read_ar_hdr, \
+ NAME##_write_ar_hdr, \
+ NAME##_openr_next_archived_file, \
+ NAME##_get_elt_at_index, \
+ NAME##_generic_stat_arch_elt, \
+ NAME##_update_armap_timestamp
+
+ bfd_boolean (*_bfd_slurp_armap) (bfd *);
+ bfd_boolean (*_bfd_slurp_extended_name_table) (bfd *);
+ bfd_boolean (*_bfd_construct_extended_name_table)
+ (bfd *, char **, bfd_size_type *, const char **);
+ void (*_bfd_truncate_arname) (bfd *, const char *, char *);
+ bfd_boolean (*write_armap)
+ (bfd *, unsigned int, struct orl *, unsigned int, int);
+ void * (*_bfd_read_ar_hdr_fn) (bfd *);
+ bfd_boolean (*_bfd_write_ar_hdr_fn) (bfd *, bfd *);
+ bfd * (*openr_next_archived_file) (bfd *, bfd *);
+#define bfd_get_elt_at_index(b,i) BFD_SEND (b, _bfd_get_elt_at_index, (b,i))
+ bfd * (*_bfd_get_elt_at_index) (bfd *, symindex);
+ int (*_bfd_stat_arch_elt) (bfd *, struct stat *);
+ bfd_boolean (*_bfd_update_armap_timestamp) (bfd *);
+
+ /* Entry points used for symbols. */
+#define BFD_JUMP_TABLE_SYMBOLS(NAME) \
+ NAME##_get_symtab_upper_bound, \
+ NAME##_canonicalize_symtab, \
+ NAME##_make_empty_symbol, \
+ NAME##_print_symbol, \
+ NAME##_get_symbol_info, \
+ NAME##_bfd_is_local_label_name, \
+ NAME##_bfd_is_target_special_symbol, \
+ NAME##_get_lineno, \
+ NAME##_find_nearest_line, \
+ _bfd_generic_find_nearest_line_discriminator, \
+ _bfd_generic_find_line, \
+ NAME##_find_inliner_info, \
+ NAME##_bfd_make_debug_symbol, \
+ NAME##_read_minisymbols, \
+ NAME##_minisymbol_to_symbol
+
+ long (*_bfd_get_symtab_upper_bound) (bfd *);
+ long (*_bfd_canonicalize_symtab)
+ (bfd *, struct bfd_symbol **);
+ struct bfd_symbol *
+ (*_bfd_make_empty_symbol) (bfd *);
+ void (*_bfd_print_symbol)
+ (bfd *, void *, struct bfd_symbol *, bfd_print_symbol_type);
+#define bfd_print_symbol(b,p,s,e) BFD_SEND (b, _bfd_print_symbol, (b,p,s,e))
+ void (*_bfd_get_symbol_info)
+ (bfd *, struct bfd_symbol *, symbol_info *);
+#define bfd_get_symbol_info(b,p,e) BFD_SEND (b, _bfd_get_symbol_info, (b,p,e))
+ bfd_boolean (*_bfd_is_local_label_name) (bfd *, const char *);
+ bfd_boolean (*_bfd_is_target_special_symbol) (bfd *, asymbol *);
+ alent * (*_get_lineno) (bfd *, struct bfd_symbol *);
+ bfd_boolean (*_bfd_find_nearest_line)
+ (bfd *, struct bfd_section *, struct bfd_symbol **, bfd_vma,
+ const char **, const char **, unsigned int *);
+ bfd_boolean (*_bfd_find_nearest_line_discriminator)
+ (bfd *, struct bfd_section *, struct bfd_symbol **, bfd_vma,
+ const char **, const char **, unsigned int *, unsigned int *);
+ bfd_boolean (*_bfd_find_line)
+ (bfd *, struct bfd_symbol **, struct bfd_symbol *,
+ const char **, unsigned int *);
+ bfd_boolean (*_bfd_find_inliner_info)
+ (bfd *, const char **, const char **, unsigned int *);
+ /* Back-door to allow format-aware applications to create debug symbols
+ while using BFD for everything else. Currently used by the assembler
+ when creating COFF files. */
+ asymbol * (*_bfd_make_debug_symbol)
+ (bfd *, void *, unsigned long size);
+#define bfd_read_minisymbols(b, d, m, s) \
+ BFD_SEND (b, _read_minisymbols, (b, d, m, s))
+ long (*_read_minisymbols)
+ (bfd *, bfd_boolean, void **, unsigned int *);
+#define bfd_minisymbol_to_symbol(b, d, m, f) \
+ BFD_SEND (b, _minisymbol_to_symbol, (b, d, m, f))
+ asymbol * (*_minisymbol_to_symbol)
+ (bfd *, bfd_boolean, const void *, asymbol *);
+
+ /* Routines for relocs. */
+#define BFD_JUMP_TABLE_RELOCS(NAME) \
+ NAME##_get_reloc_upper_bound, \
+ NAME##_canonicalize_reloc, \
+ NAME##_bfd_reloc_type_lookup, \
+ NAME##_bfd_reloc_name_lookup
+
+ long (*_get_reloc_upper_bound) (bfd *, sec_ptr);
+ long (*_bfd_canonicalize_reloc)
+ (bfd *, sec_ptr, arelent **, struct bfd_symbol **);
+ /* See documentation on reloc types. */
+ reloc_howto_type *
+ (*reloc_type_lookup) (bfd *, bfd_reloc_code_real_type);
+ reloc_howto_type *
+ (*reloc_name_lookup) (bfd *, const char *);
+
+
+ /* Routines used when writing an object file. */
+#define BFD_JUMP_TABLE_WRITE(NAME) \
+ NAME##_set_arch_mach, \
+ NAME##_set_section_contents
+
+ bfd_boolean (*_bfd_set_arch_mach)
+ (bfd *, enum bfd_architecture, unsigned long);
+ bfd_boolean (*_bfd_set_section_contents)
+ (bfd *, sec_ptr, const void *, file_ptr, bfd_size_type);
+
+ /* Routines used by the linker. */
+#define BFD_JUMP_TABLE_LINK(NAME) \
+ NAME##_sizeof_headers, \
+ NAME##_bfd_get_relocated_section_contents, \
+ NAME##_bfd_relax_section, \
+ NAME##_bfd_link_hash_table_create, \
+ NAME##_bfd_link_hash_table_free, \
+ NAME##_bfd_link_add_symbols, \
+ NAME##_bfd_link_just_syms, \
+ NAME##_bfd_copy_link_hash_symbol_type, \
+ NAME##_bfd_final_link, \
+ NAME##_bfd_link_split_section, \
+ NAME##_bfd_gc_sections, \
+ NAME##_bfd_lookup_section_flags, \
+ NAME##_bfd_merge_sections, \
+ NAME##_bfd_is_group_section, \
+ NAME##_bfd_discard_group, \
+ NAME##_section_already_linked, \
+ NAME##_bfd_define_common_symbol
+
+ int (*_bfd_sizeof_headers) (bfd *, struct bfd_link_info *);
+ bfd_byte * (*_bfd_get_relocated_section_contents)
+ (bfd *, struct bfd_link_info *, struct bfd_link_order *,
+ bfd_byte *, bfd_boolean, struct bfd_symbol **);
+
+ bfd_boolean (*_bfd_relax_section)
+ (bfd *, struct bfd_section *, struct bfd_link_info *, bfd_boolean *);
+
+ /* Create a hash table for the linker. Different backends store
+ different information in this table. */
+ struct bfd_link_hash_table *
+ (*_bfd_link_hash_table_create) (bfd *);
+
+ /* Release the memory associated with the linker hash table. */
+ void (*_bfd_link_hash_table_free) (struct bfd_link_hash_table *);
+
+ /* Add symbols from this object file into the hash table. */
+ bfd_boolean (*_bfd_link_add_symbols) (bfd *, struct bfd_link_info *);
+
+ /* Indicate that we are only retrieving symbol values from this section. */
+ void (*_bfd_link_just_syms) (asection *, struct bfd_link_info *);
+
+ /* Copy the symbol type of a linker hash table entry. */
+#define bfd_copy_link_hash_symbol_type(b, t, f) \
+ BFD_SEND (b, _bfd_copy_link_hash_symbol_type, (b, t, f))
+ void (*_bfd_copy_link_hash_symbol_type)
+ (bfd *, struct bfd_link_hash_entry *, struct bfd_link_hash_entry *);
+
+ /* Do a link based on the link_order structures attached to each
+ section of the BFD. */
+ bfd_boolean (*_bfd_final_link) (bfd *, struct bfd_link_info *);
+
+ /* Should this section be split up into smaller pieces during linking. */
+ bfd_boolean (*_bfd_link_split_section) (bfd *, struct bfd_section *);
+
+ /* Remove sections that are not referenced from the output. */
+ bfd_boolean (*_bfd_gc_sections) (bfd *, struct bfd_link_info *);
+
+ /* Sets the bitmask of allowed and disallowed section flags. */
+ bfd_boolean (*_bfd_lookup_section_flags) (struct bfd_link_info *,
+ struct flag_info *,
+ asection *);
+
+ /* Attempt to merge SEC_MERGE sections. */
+ bfd_boolean (*_bfd_merge_sections) (bfd *, struct bfd_link_info *);
+
+ /* Is this section a member of a group? */
+ bfd_boolean (*_bfd_is_group_section) (bfd *, const struct bfd_section *);
+
+ /* Discard members of a group. */
+ bfd_boolean (*_bfd_discard_group) (bfd *, struct bfd_section *);
+
+ /* Check if SEC has been already linked during a reloceatable or
+ final link. */
+ bfd_boolean (*_section_already_linked) (bfd *, asection *,
+ struct bfd_link_info *);
+
+ /* Define a common symbol. */
+ bfd_boolean (*_bfd_define_common_symbol) (bfd *, struct bfd_link_info *,
+ struct bfd_link_hash_entry *);
+
+ /* Routines to handle dynamic symbols and relocs. */
+#define BFD_JUMP_TABLE_DYNAMIC(NAME) \
+ NAME##_get_dynamic_symtab_upper_bound, \
+ NAME##_canonicalize_dynamic_symtab, \
+ NAME##_get_synthetic_symtab, \
+ NAME##_get_dynamic_reloc_upper_bound, \
+ NAME##_canonicalize_dynamic_reloc
+
+ /* Get the amount of memory required to hold the dynamic symbols. */
+ long (*_bfd_get_dynamic_symtab_upper_bound) (bfd *);
+ /* Read in the dynamic symbols. */
+ long (*_bfd_canonicalize_dynamic_symtab)
+ (bfd *, struct bfd_symbol **);
+ /* Create synthetized symbols. */
+ long (*_bfd_get_synthetic_symtab)
+ (bfd *, long, struct bfd_symbol **, long, struct bfd_symbol **,
+ struct bfd_symbol **);
+ /* Get the amount of memory required to hold the dynamic relocs. */
+ long (*_bfd_get_dynamic_reloc_upper_bound) (bfd *);
+ /* Read in the dynamic relocs. */
+ long (*_bfd_canonicalize_dynamic_reloc)
+ (bfd *, arelent **, struct bfd_symbol **);
+
+ /* Opposite endian version of this target. */
+ const struct bfd_target * alternative_target;
+
+ /* Data for use by back-end routines, which isn't
+ generic enough to belong in this structure. */
+ const void *backend_data;
+
+} bfd_target;
+
+bfd_boolean bfd_set_default_target (const char *name);
+
+const bfd_target *bfd_find_target (const char *target_name, bfd *abfd);
+
+const bfd_target *bfd_get_target_info (const char *target_name,
+ bfd *abfd,
+ bfd_boolean *is_bigendian,
+ int *underscoring,
+ const char **def_target_arch);
+const char ** bfd_target_list (void);
+
+const bfd_target *bfd_search_for_target
+ (int (*search_func) (const bfd_target *, void *),
+ void *);
+
+/* Extracted from format.c. */
+bfd_boolean bfd_check_format (bfd *abfd, bfd_format format);
+
+bfd_boolean bfd_check_format_matches
+ (bfd *abfd, bfd_format format, char ***matching);
+
+bfd_boolean bfd_set_format (bfd *abfd, bfd_format format);
+
+const char *bfd_format_string (bfd_format format);
+
+/* Extracted from linker.c. */
+bfd_boolean bfd_link_split_section (bfd *abfd, asection *sec);
+
+#define bfd_link_split_section(abfd, sec) \
+ BFD_SEND (abfd, _bfd_link_split_section, (abfd, sec))
+
+bfd_boolean bfd_section_already_linked (bfd *abfd,
+ asection *sec,
+ struct bfd_link_info *info);
+
+#define bfd_section_already_linked(abfd, sec, info) \
+ BFD_SEND (abfd, _section_already_linked, (abfd, sec, info))
+
+bfd_boolean bfd_generic_define_common_symbol
+ (bfd *output_bfd, struct bfd_link_info *info,
+ struct bfd_link_hash_entry *h);
+
+#define bfd_define_common_symbol(output_bfd, info, h) \
+ BFD_SEND (output_bfd, _bfd_define_common_symbol, (output_bfd, info, h))
+
+struct bfd_elf_version_tree * bfd_find_version_for_sym
+ (struct bfd_elf_version_tree *verdefs,
+ const char *sym_name, bfd_boolean *hide);
+
+bfd_boolean bfd_hide_sym_by_version
+ (struct bfd_elf_version_tree *verdefs, const char *sym_name);
+
+/* Extracted from simple.c. */
+bfd_byte *bfd_simple_get_relocated_section_contents
+ (bfd *abfd, asection *sec, bfd_byte *outbuf, asymbol **symbol_table);
+
+/* Extracted from compress.c. */
+bfd_boolean bfd_compress_section_contents
+ (bfd *abfd, asection *section, bfd_byte *uncompressed_buffer,
+ bfd_size_type uncompressed_size);
+
+bfd_boolean bfd_get_full_section_contents
+ (bfd *abfd, asection *section, bfd_byte **ptr);
+
+bfd_boolean bfd_is_section_compressed
+ (bfd *abfd, asection *section);
+
+bfd_boolean bfd_init_section_decompress_status
+ (bfd *abfd, asection *section);
+
+bfd_boolean bfd_init_section_compress_status
+ (bfd *abfd, asection *section);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/include/bfdlink.h b/include/bfdlink.h
new file mode 100644
index 0000000..d900b47
--- /dev/null
+++ b/include/bfdlink.h
@@ -0,0 +1,819 @@
+/* bfdlink.h -- header file for BFD link routines
+ Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+ 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
+ Free Software Foundation, Inc.
+ Written by Steve Chamberlain and Ian Lance Taylor, Cygnus Support.
+
+ This file is part of BFD, the Binary File Descriptor library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#ifndef BFDLINK_H
+#define BFDLINK_H
+
+/* Which symbols to strip during a link. */
+enum bfd_link_strip
+{
+ strip_none, /* Don't strip any symbols. */
+ strip_debugger, /* Strip debugging symbols. */
+ strip_some, /* keep_hash is the list of symbols to keep. */
+ strip_all /* Strip all symbols. */
+};
+
+/* Which local symbols to discard during a link. This is irrelevant
+ if strip_all is used. */
+enum bfd_link_discard
+{
+ discard_sec_merge, /* Discard local temporary symbols in SEC_MERGE
+ sections. */
+ discard_none, /* Don't discard any locals. */
+ discard_l, /* Discard local temporary symbols. */
+ discard_all /* Discard all locals. */
+};
+
+/* Describes the type of hash table entry structure being used.
+ Different hash table structure have different fields and so
+ support different linking features. */
+enum bfd_link_hash_table_type
+ {
+ bfd_link_generic_hash_table,
+ bfd_link_elf_hash_table
+ };
+
+/* These are the possible types of an entry in the BFD link hash
+ table. */
+
+enum bfd_link_hash_type
+{
+ bfd_link_hash_new, /* Symbol is new. */
+ bfd_link_hash_undefined, /* Symbol seen before, but undefined. */
+ bfd_link_hash_undefweak, /* Symbol is weak and undefined. */
+ bfd_link_hash_defined, /* Symbol is defined. */
+ bfd_link_hash_defweak, /* Symbol is weak and defined. */
+ bfd_link_hash_common, /* Symbol is common. */
+ bfd_link_hash_indirect, /* Symbol is an indirect link. */
+ bfd_link_hash_warning /* Like indirect, but warn if referenced. */
+};
+
+enum bfd_link_common_skip_ar_symbols
+{
+ bfd_link_common_skip_none,
+ bfd_link_common_skip_text,
+ bfd_link_common_skip_data,
+ bfd_link_common_skip_all
+};
+
+struct bfd_link_hash_common_entry
+ {
+ unsigned int alignment_power; /* Alignment. */
+ asection *section; /* Symbol section. */
+ };
+
+/* The linking routines use a hash table which uses this structure for
+ its elements. */
+
+struct bfd_link_hash_entry
+{
+ /* Base hash table entry structure. */
+ struct bfd_hash_entry root;
+
+ /* Type of this entry. */
+ ENUM_BITFIELD (bfd_link_hash_type) type : 8;
+
+ unsigned int non_ir_ref : 1;
+
+ /* A union of information depending upon the type. */
+ union
+ {
+ /* Nothing is kept for bfd_hash_new. */
+ /* bfd_link_hash_undefined, bfd_link_hash_undefweak. */
+ struct
+ {
+ /* Undefined and common symbols are kept in a linked list through
+ this field. This field is present in all of the union element
+ so that we don't need to remove entries from the list when we
+ change their type. Removing entries would either require the
+ list to be doubly linked, which would waste more memory, or
+ require a traversal. When an undefined or common symbol is
+ created, it should be added to this list, the head of which is in
+ the link hash table itself. As symbols are defined, they need
+ not be removed from the list; anything which reads the list must
+ doublecheck the symbol type.
+
+ Weak symbols are not kept on this list.
+
+ Defined and defweak symbols use this field as a reference marker.
+ If the field is not NULL, or this structure is the tail of the
+ undefined symbol list, the symbol has been referenced. If the
+ symbol is undefined and becomes defined, this field will
+ automatically be non-NULL since the symbol will have been on the
+ undefined symbol list. */
+ struct bfd_link_hash_entry *next;
+ bfd *abfd; /* BFD symbol was found in. */
+ } undef;
+ /* bfd_link_hash_defined, bfd_link_hash_defweak. */
+ struct
+ {
+ struct bfd_link_hash_entry *next;
+ asection *section; /* Symbol section. */
+ bfd_vma value; /* Symbol value. */
+ } def;
+ /* bfd_link_hash_indirect, bfd_link_hash_warning. */
+ struct
+ {
+ struct bfd_link_hash_entry *next;
+ struct bfd_link_hash_entry *link; /* Real symbol. */
+ const char *warning; /* Warning (bfd_link_hash_warning only). */
+ } i;
+ /* bfd_link_hash_common. */
+ struct
+ {
+ struct bfd_link_hash_entry *next;
+ /* The linker needs to know three things about common
+ symbols: the size, the alignment, and the section in
+ which the symbol should be placed. We store the size
+ here, and we allocate a small structure to hold the
+ section and the alignment. The alignment is stored as a
+ power of two. We don't store all the information
+ directly because we don't want to increase the size of
+ the union; this structure is a major space user in the
+ linker. */
+ struct bfd_link_hash_common_entry *p;
+ bfd_size_type size; /* Common symbol size. */
+ } c;
+ } u;
+};
+
+/* This is the link hash table. It is a derived class of
+ bfd_hash_table. */
+
+struct bfd_link_hash_table
+{
+ /* The hash table itself. */
+ struct bfd_hash_table table;
+ /* A linked list of undefined and common symbols, linked through the
+ next field in the bfd_link_hash_entry structure. */
+ struct bfd_link_hash_entry *undefs;
+ /* Entries are added to the tail of the undefs list. */
+ struct bfd_link_hash_entry *undefs_tail;
+ /* The type of the link hash table. */
+ enum bfd_link_hash_table_type type;
+};
+
+/* Look up an entry in a link hash table. If FOLLOW is TRUE, this
+ follows bfd_link_hash_indirect and bfd_link_hash_warning links to
+ the real symbol. */
+extern struct bfd_link_hash_entry *bfd_link_hash_lookup
+ (struct bfd_link_hash_table *, const char *, bfd_boolean create,
+ bfd_boolean copy, bfd_boolean follow);
+
+/* Look up an entry in the main linker hash table if the symbol might
+ be wrapped. This should only be used for references to an
+ undefined symbol, not for definitions of a symbol. */
+
+extern struct bfd_link_hash_entry *bfd_wrapped_link_hash_lookup
+ (bfd *, struct bfd_link_info *, const char *, bfd_boolean,
+ bfd_boolean, bfd_boolean);
+
+/* Traverse a link hash table. */
+extern void bfd_link_hash_traverse
+ (struct bfd_link_hash_table *,
+ bfd_boolean (*) (struct bfd_link_hash_entry *, void *),
+ void *);
+
+/* Add an entry to the undefs list. */
+extern void bfd_link_add_undef
+ (struct bfd_link_hash_table *, struct bfd_link_hash_entry *);
+
+/* Remove symbols from the undefs list that don't belong there. */
+extern void bfd_link_repair_undef_list
+ (struct bfd_link_hash_table *table);
+
+/* Read symbols and cache symbol pointer array in outsymbols. */
+extern bfd_boolean bfd_generic_link_read_symbols (bfd *);
+
+struct bfd_sym_chain
+{
+ struct bfd_sym_chain *next;
+ const char *name;
+};
+
+/* How to handle unresolved symbols.
+ There are four possibilities which are enumerated below: */
+enum report_method
+{
+ /* This is the initial value when then link_info structure is created.
+ It allows the various stages of the linker to determine whether they
+ allowed to set the value. */
+ RM_NOT_YET_SET = 0,
+ RM_IGNORE,
+ RM_GENERATE_WARNING,
+ RM_GENERATE_ERROR
+};
+
+typedef enum {with_flags, without_flags} flag_type;
+
+/* A section flag list. */
+struct flag_info_list
+{
+ flag_type with;
+ const char *name;
+ bfd_boolean valid;
+ struct flag_info_list *next;
+};
+
+/* Section flag info. */
+struct flag_info
+{
+ flagword only_with_flags;
+ flagword not_with_flags;
+ struct flag_info_list *flag_list;
+ bfd_boolean flags_initialized;
+};
+
+struct bfd_elf_dynamic_list;
+struct bfd_elf_version_tree;
+
+/* This structure holds all the information needed to communicate
+ between BFD and the linker when doing a link. */
+
+struct bfd_link_info
+{
+ /* TRUE if BFD should generate a shared object (or a pie). */
+ unsigned int shared: 1;
+
+ /* TRUE if generating an executable, position independent or not. */
+ unsigned int executable : 1;
+
+ /* TRUE if generating a position independent executable. */
+ unsigned int pie: 1;
+
+ /* TRUE if BFD should generate a relocatable object file. */
+ unsigned int relocatable: 1;
+
+ /* TRUE if BFD should pre-bind symbols in a shared object. */
+ unsigned int symbolic: 1;
+
+ /* TRUE if executable should not contain copy relocs.
+ Setting this true may result in a non-sharable text segment. */
+ unsigned int nocopyreloc: 1;
+
+ /* TRUE if BFD should export all symbols in the dynamic symbol table
+ of an executable, rather than only those used. */
+ unsigned int export_dynamic: 1;
+
+ /* TRUE if a default symbol version should be created and used for
+ exported symbols. */
+ unsigned int create_default_symver: 1;
+
+ /* TRUE if unreferenced sections should be removed. */
+ unsigned int gc_sections: 1;
+
+ /* TRUE if every symbol should be reported back via the notice
+ callback. */
+ unsigned int notice_all: 1;
+
+ /* TRUE if we are loading LTO outputs. */
+ unsigned int loading_lto_outputs: 1;
+
+ /* TRUE if global symbols in discarded sections should be stripped. */
+ unsigned int strip_discarded: 1;
+
+ /* TRUE if all data symbols should be dynamic. */
+ unsigned int dynamic_data: 1;
+
+ /* Which symbols to strip. */
+ ENUM_BITFIELD (bfd_link_strip) strip : 2;
+
+ /* Which local symbols to discard. */
+ ENUM_BITFIELD (bfd_link_discard) discard : 2;
+
+ /* Criteria for skipping symbols when determining
+ whether to include an object from an archive. */
+ ENUM_BITFIELD (bfd_link_common_skip_ar_symbols) common_skip_ar_symbols : 2;
+
+ /* What to do with unresolved symbols in an object file.
+ When producing executables the default is GENERATE_ERROR.
+ When producing shared libraries the default is IGNORE. The
+ assumption with shared libraries is that the reference will be
+ resolved at load/execution time. */
+ ENUM_BITFIELD (report_method) unresolved_syms_in_objects : 2;
+
+ /* What to do with unresolved symbols in a shared library.
+ The same defaults apply. */
+ ENUM_BITFIELD (report_method) unresolved_syms_in_shared_libs : 2;
+
+ /* TRUE if shared objects should be linked directly, not shared. */
+ unsigned int static_link: 1;
+
+ /* TRUE if symbols should be retained in memory, FALSE if they
+ should be freed and reread. */
+ unsigned int keep_memory: 1;
+
+ /* TRUE if BFD should generate relocation information in the final
+ executable. */
+ unsigned int emitrelocations: 1;
+
+ /* TRUE if PT_GNU_RELRO segment should be created. */
+ unsigned int relro: 1;
+
+ /* TRUE if .eh_frame_hdr section and PT_GNU_EH_FRAME ELF segment
+ should be created. */
+ unsigned int eh_frame_hdr: 1;
+
+ /* TRUE if we should warn when adding a DT_TEXTREL to a shared object. */
+ unsigned int warn_shared_textrel: 1;
+
+ /* TRUE if we should error when adding a DT_TEXTREL. */
+ unsigned int error_textrel: 1;
+
+ /* TRUE if .hash section should be created. */
+ unsigned int emit_hash: 1;
+
+ /* TRUE if .gnu.hash section should be created. */
+ unsigned int emit_gnu_hash: 1;
+
+ /* If TRUE reduce memory overheads, at the expense of speed. This will
+ cause map file generation to use an O(N^2) algorithm and disable
+ caching ELF symbol buffer. */
+ unsigned int reduce_memory_overheads: 1;
+
+ /* TRUE if the output file should be in a traditional format. This
+ is equivalent to the setting of the BFD_TRADITIONAL_FORMAT flag
+ on the output file, but may be checked when reading the input
+ files. */
+ unsigned int traditional_format: 1;
+
+ /* TRUE if non-PLT relocs should be merged into one reloc section
+ and sorted so that relocs against the same symbol come together. */
+ unsigned int combreloc: 1;
+
+ /* TRUE if a default symbol version should be created and used for
+ imported symbols. */
+ unsigned int default_imported_symver: 1;
+
+ /* TRUE if the new ELF dynamic tags are enabled. */
+ unsigned int new_dtags: 1;
+
+ /* FALSE if .eh_frame unwind info should be generated for PLT and other
+ linker created sections, TRUE if it should be omitted. */
+ unsigned int no_ld_generated_unwind_info: 1;
+
+ /* TRUE if BFD should generate a "task linked" object file,
+ similar to relocatable but also with globals converted to
+ statics. */
+ unsigned int task_link: 1;
+
+ /* TRUE if ok to have multiple definition. */
+ unsigned int allow_multiple_definition: 1;
+
+ /* TRUE if ok to have version with no definition. */
+ unsigned int allow_undefined_version: 1;
+
+ /* TRUE if some symbols have to be dynamic, controlled by
+ --dynamic-list command line options. */
+ unsigned int dynamic: 1;
+
+ /* TRUE if PT_GNU_STACK segment should be created with PF_R|PF_W|PF_X
+ flags. */
+ unsigned int execstack: 1;
+
+ /* TRUE if PT_GNU_STACK segment should be created with PF_R|PF_W
+ flags. */
+ unsigned int noexecstack: 1;
+
+ /* TRUE if we want to produced optimized output files. This might
+ need much more time and therefore must be explicitly selected. */
+ unsigned int optimize: 1;
+
+ /* TRUE if user should be informed of removed unreferenced sections. */
+ unsigned int print_gc_sections: 1;
+
+ /* TRUE if we should warn alternate ELF machine code. */
+ unsigned int warn_alternate_em: 1;
+
+ /* TRUE if the linker script contained an explicit PHDRS command. */
+ unsigned int user_phdrs: 1;
+
+ /* Char that may appear as the first char of a symbol, but should be
+ skipped (like symbol_leading_char) when looking up symbols in
+ wrap_hash. Used by PowerPC Linux for 'dot' symbols. */
+ char wrap_char;
+
+ /* Separator between archive and filename in linker script filespecs. */
+ char path_separator;
+
+ /* Function callbacks. */
+ const struct bfd_link_callbacks *callbacks;
+
+ /* Hash table handled by BFD. */
+ struct bfd_link_hash_table *hash;
+
+ /* Hash table of symbols to keep. This is NULL unless strip is
+ strip_some. */
+ struct bfd_hash_table *keep_hash;
+
+ /* Hash table of symbols to report back via the notice callback. If
+ this is NULL, and notice_all is FALSE, then no symbols are
+ reported back. */
+ struct bfd_hash_table *notice_hash;
+
+ /* Hash table of symbols which are being wrapped (the --wrap linker
+ option). If this is NULL, no symbols are being wrapped. */
+ struct bfd_hash_table *wrap_hash;
+
+ /* The output BFD. */
+ bfd *output_bfd;
+
+ /* The list of input BFD's involved in the link. These are chained
+ together via the link_next field. */
+ bfd *input_bfds;
+ bfd **input_bfds_tail;
+
+ /* Non-NULL if .note.gnu.build-id section should be created. */
+ char *emit_note_gnu_build_id;
+
+ /* If a symbol should be created for each input BFD, this is section
+ where those symbols should be placed. It must be a section in
+ the output BFD. It may be NULL, in which case no such symbols
+ will be created. This is to support CREATE_OBJECT_SYMBOLS in the
+ linker command language. */
+ asection *create_object_symbols_section;
+
+ /* List of global symbol names that are starting points for marking
+ sections against garbage collection. */
+ struct bfd_sym_chain *gc_sym_list;
+
+ /* If a base output file is wanted, then this points to it */
+ void *base_file;
+
+ /* The function to call when the executable or shared object is
+ loaded. */
+ const char *init_function;
+
+ /* The function to call when the executable or shared object is
+ unloaded. */
+ const char *fini_function;
+
+ /* Number of relaxation passes. Usually only one relaxation pass
+ is needed. But a backend can have as many relaxation passes as
+ necessary. During bfd_relax_section call, it is set to the
+ current pass, starting from 0. */
+ int relax_pass;
+
+ /* Number of relaxation trips. This number is incremented every
+ time the relaxation pass is restarted due to a previous
+ relaxation returning true in *AGAIN. */
+ int relax_trip;
+
+ /* Non-zero if auto-import thunks for DATA items in pei386 DLLs
+ should be generated/linked against. Set to 1 if this feature
+ is explicitly requested by the user, -1 if enabled by default. */
+ int pei386_auto_import;
+
+ /* Non-zero if runtime relocs for DATA items with non-zero addends
+ in pei386 DLLs should be generated. Set to 1 if this feature
+ is explicitly requested by the user, -1 if enabled by default. */
+ int pei386_runtime_pseudo_reloc;
+
+ /* How many spare .dynamic DT_NULL entries should be added? */
+ unsigned int spare_dynamic_tags;
+
+ /* May be used to set DT_FLAGS for ELF. */
+ bfd_vma flags;
+
+ /* May be used to set DT_FLAGS_1 for ELF. */
+ bfd_vma flags_1;
+
+ /* Start and end of RELRO region. */
+ bfd_vma relro_start, relro_end;
+
+ /* List of symbols should be dynamic. */
+ struct bfd_elf_dynamic_list *dynamic_list;
+
+ /* The version information. */
+ struct bfd_elf_version_tree *version_info;
+};
+
+/* This structures holds a set of callback functions. These are called
+ by the BFD linker routines. Except for the info functions, the first
+ argument to each callback function is the bfd_link_info structure
+ being used and each function returns a boolean value. If the
+ function returns FALSE, then the BFD function which called it should
+ return with a failure indication. */
+
+struct bfd_link_callbacks
+{
+ /* A function which is called when an object is added from an
+ archive. ABFD is the archive element being added. NAME is the
+ name of the symbol which caused the archive element to be pulled
+ in. This function may set *SUBSBFD to point to an alternative
+ BFD from which symbols should in fact be added in place of the
+ original BFD's symbols. */
+ bfd_boolean (*add_archive_element)
+ (struct bfd_link_info *, bfd *abfd, const char *name, bfd **subsbfd);
+ /* A function which is called when a symbol is found with multiple
+ definitions. H is the symbol which is defined multiple times.
+ NBFD is the new BFD, NSEC is the new section, and NVAL is the new
+ value. NSEC may be bfd_com_section or bfd_ind_section. */
+ bfd_boolean (*multiple_definition)
+ (struct bfd_link_info *, struct bfd_link_hash_entry *h,
+ bfd *nbfd, asection *nsec, bfd_vma nval);
+ /* A function which is called when a common symbol is defined
+ multiple times. H is the symbol appearing multiple times.
+ NBFD is the BFD of the new symbol. NTYPE is the type of the new
+ symbol, one of bfd_link_hash_defined, bfd_link_hash_common, or
+ bfd_link_hash_indirect. If NTYPE is bfd_link_hash_common, NSIZE
+ is the size of the new symbol. */
+ bfd_boolean (*multiple_common)
+ (struct bfd_link_info *, struct bfd_link_hash_entry *h,
+ bfd *nbfd, enum bfd_link_hash_type ntype, bfd_vma nsize);
+ /* A function which is called to add a symbol to a set. ENTRY is
+ the link hash table entry for the set itself (e.g.,
+ __CTOR_LIST__). RELOC is the relocation to use for an entry in
+ the set when generating a relocatable file, and is also used to
+ get the size of the entry when generating an executable file.
+ ABFD, SEC and VALUE identify the value to add to the set. */
+ bfd_boolean (*add_to_set)
+ (struct bfd_link_info *, struct bfd_link_hash_entry *entry,
+ bfd_reloc_code_real_type reloc, bfd *abfd, asection *sec, bfd_vma value);
+ /* A function which is called when the name of a g++ constructor or
+ destructor is found. This is only called by some object file
+ formats. CONSTRUCTOR is TRUE for a constructor, FALSE for a
+ destructor. This will use BFD_RELOC_CTOR when generating a
+ relocatable file. NAME is the name of the symbol found. ABFD,
+ SECTION and VALUE are the value of the symbol. */
+ bfd_boolean (*constructor)
+ (struct bfd_link_info *, bfd_boolean constructor, const char *name,
+ bfd *abfd, asection *sec, bfd_vma value);
+ /* A function which is called to issue a linker warning. For
+ example, this is called when there is a reference to a warning
+ symbol. WARNING is the warning to be issued. SYMBOL is the name
+ of the symbol which triggered the warning; it may be NULL if
+ there is none. ABFD, SECTION and ADDRESS identify the location
+ which trigerred the warning; either ABFD or SECTION or both may
+ be NULL if the location is not known. */
+ bfd_boolean (*warning)
+ (struct bfd_link_info *, const char *warning, const char *symbol,
+ bfd *abfd, asection *section, bfd_vma address);
+ /* A function which is called when a relocation is attempted against
+ an undefined symbol. NAME is the symbol which is undefined.
+ ABFD, SECTION and ADDRESS identify the location from which the
+ reference is made. IS_FATAL indicates whether an undefined symbol is
+ a fatal error or not. In some cases SECTION may be NULL. */
+ bfd_boolean (*undefined_symbol)
+ (struct bfd_link_info *, const char *name, bfd *abfd,
+ asection *section, bfd_vma address, bfd_boolean is_fatal);
+ /* A function which is called when a reloc overflow occurs. ENTRY is
+ the link hash table entry for the symbol the reloc is against.
+ NAME is the name of the local symbol or section the reloc is
+ against, RELOC_NAME is the name of the relocation, and ADDEND is
+ any addend that is used. ABFD, SECTION and ADDRESS identify the
+ location at which the overflow occurs; if this is the result of a
+ bfd_section_reloc_link_order or bfd_symbol_reloc_link_order, then
+ ABFD will be NULL. */
+ bfd_boolean (*reloc_overflow)
+ (struct bfd_link_info *, struct bfd_link_hash_entry *entry,
+ const char *name, const char *reloc_name, bfd_vma addend,
+ bfd *abfd, asection *section, bfd_vma address);
+ /* A function which is called when a dangerous reloc is performed.
+ MESSAGE is an appropriate message.
+ ABFD, SECTION and ADDRESS identify the location at which the
+ problem occurred; if this is the result of a
+ bfd_section_reloc_link_order or bfd_symbol_reloc_link_order, then
+ ABFD will be NULL. */
+ bfd_boolean (*reloc_dangerous)
+ (struct bfd_link_info *, const char *message,
+ bfd *abfd, asection *section, bfd_vma address);
+ /* A function which is called when a reloc is found to be attached
+ to a symbol which is not being written out. NAME is the name of
+ the symbol. ABFD, SECTION and ADDRESS identify the location of
+ the reloc; if this is the result of a
+ bfd_section_reloc_link_order or bfd_symbol_reloc_link_order, then
+ ABFD will be NULL. */
+ bfd_boolean (*unattached_reloc)
+ (struct bfd_link_info *, const char *name,
+ bfd *abfd, asection *section, bfd_vma address);
+ /* A function which is called when a symbol in notice_hash is
+ defined or referenced. H is the symbol. ABFD, SECTION and
+ ADDRESS are the (new) value of the symbol. If SECTION is
+ bfd_und_section, this is a reference. FLAGS are the symbol
+ BSF_* flags. STRING is the name of the symbol to indirect to if
+ the sym is indirect, or the warning string if a warning sym. */
+ bfd_boolean (*notice)
+ (struct bfd_link_info *, struct bfd_link_hash_entry *h,
+ bfd *abfd, asection *section, bfd_vma address, flagword flags,
+ const char *string);
+ /* Error or warning link info message. */
+ void (*einfo)
+ (const char *fmt, ...);
+ /* General link info message. */
+ void (*info)
+ (const char *fmt, ...);
+ /* Message to be printed in linker map file. */
+ void (*minfo)
+ (const char *fmt, ...);
+ /* This callback provides a chance for users of the BFD library to
+ override its decision about whether to place two adjacent sections
+ into the same segment. */
+ bfd_boolean (*override_segment_assignment)
+ (struct bfd_link_info *, bfd * abfd,
+ asection * current_section, asection * previous_section,
+ bfd_boolean new_segment);
+};
+
+/* The linker builds link_order structures which tell the code how to
+ include input data in the output file. */
+
+/* These are the types of link_order structures. */
+
+enum bfd_link_order_type
+{
+ bfd_undefined_link_order, /* Undefined. */
+ bfd_indirect_link_order, /* Built from a section. */
+ bfd_data_link_order, /* Set to explicit data. */
+ bfd_section_reloc_link_order, /* Relocate against a section. */
+ bfd_symbol_reloc_link_order /* Relocate against a symbol. */
+};
+
+/* This is the link_order structure itself. These form a chain
+ attached to the output section whose contents they are describing. */
+
+struct bfd_link_order
+{
+ /* Next link_order in chain. */
+ struct bfd_link_order *next;
+ /* Type of link_order. */
+ enum bfd_link_order_type type;
+ /* Offset within output section. */
+ bfd_vma offset;
+ /* Size within output section. */
+ bfd_size_type size;
+ /* Type specific information. */
+ union
+ {
+ struct
+ {
+ /* Section to include. If this is used, then
+ section->output_section must be the section the
+ link_order is attached to, section->output_offset must
+ equal the link_order offset field, and section->size
+ must equal the link_order size field. Maybe these
+ restrictions should be relaxed someday. */
+ asection *section;
+ } indirect;
+ struct
+ {
+ /* Size of contents, or zero when contents should be filled by
+ the architecture-dependent fill function.
+ A non-zero value allows filling of the output section
+ with an arbitrary repeated pattern. */
+ unsigned int size;
+ /* Data to put into file. */
+ bfd_byte *contents;
+ } data;
+ struct
+ {
+ /* Description of reloc to generate. Used for
+ bfd_section_reloc_link_order and
+ bfd_symbol_reloc_link_order. */
+ struct bfd_link_order_reloc *p;
+ } reloc;
+ } u;
+};
+
+/* A linker order of type bfd_section_reloc_link_order or
+ bfd_symbol_reloc_link_order means to create a reloc against a
+ section or symbol, respectively. This is used to implement -Ur to
+ generate relocs for the constructor tables. The
+ bfd_link_order_reloc structure describes the reloc that BFD should
+ create. It is similar to a arelent, but I didn't use arelent
+ because the linker does not know anything about most symbols, and
+ any asymbol structure it creates will be partially meaningless.
+ This information could logically be in the bfd_link_order struct,
+ but I didn't want to waste the space since these types of relocs
+ are relatively rare. */
+
+struct bfd_link_order_reloc
+{
+ /* Reloc type. */
+ bfd_reloc_code_real_type reloc;
+
+ union
+ {
+ /* For type bfd_section_reloc_link_order, this is the section
+ the reloc should be against. This must be a section in the
+ output BFD, not any of the input BFDs. */
+ asection *section;
+ /* For type bfd_symbol_reloc_link_order, this is the name of the
+ symbol the reloc should be against. */
+ const char *name;
+ } u;
+
+ /* Addend to use. The object file should contain zero. The BFD
+ backend is responsible for filling in the contents of the object
+ file correctly. For some object file formats (e.g., COFF) the
+ addend must be stored into in the object file, and for some
+ (e.g., SPARC a.out) it is kept in the reloc. */
+ bfd_vma addend;
+};
+
+/* Allocate a new link_order for a section. */
+extern struct bfd_link_order *bfd_new_link_order (bfd *, asection *);
+
+/* These structures are used to describe version information for the
+ ELF linker. These structures could be manipulated entirely inside
+ BFD, but it would be a pain. Instead, the regular linker sets up
+ these structures, and then passes them into BFD. */
+
+/* Glob pattern for a version. */
+
+struct bfd_elf_version_expr
+{
+ /* Next glob pattern for this version. */
+ struct bfd_elf_version_expr *next;
+ /* Glob pattern. */
+ const char *pattern;
+ /* Set if pattern is not a glob. */
+ unsigned int literal : 1;
+ /* Defined by ".symver". */
+ unsigned int symver : 1;
+ /* Defined by version script. */
+ unsigned int script : 1;
+ /* Pattern type. */
+#define BFD_ELF_VERSION_C_TYPE 1
+#define BFD_ELF_VERSION_CXX_TYPE 2
+#define BFD_ELF_VERSION_JAVA_TYPE 4
+ unsigned int mask : 3;
+};
+
+struct bfd_elf_version_expr_head
+{
+ /* List of all patterns, both wildcards and non-wildcards. */
+ struct bfd_elf_version_expr *list;
+ /* Hash table for non-wildcards. */
+ void *htab;
+ /* Remaining patterns. */
+ struct bfd_elf_version_expr *remaining;
+ /* What kind of pattern types are present in list (bitmask). */
+ unsigned int mask;
+};
+
+/* Version dependencies. */
+
+struct bfd_elf_version_deps
+{
+ /* Next dependency for this version. */
+ struct bfd_elf_version_deps *next;
+ /* The version which this version depends upon. */
+ struct bfd_elf_version_tree *version_needed;
+};
+
+/* A node in the version tree. */
+
+struct bfd_elf_version_tree
+{
+ /* Next version. */
+ struct bfd_elf_version_tree *next;
+ /* Name of this version. */
+ const char *name;
+ /* Version number. */
+ unsigned int vernum;
+ /* Regular expressions for global symbols in this version. */
+ struct bfd_elf_version_expr_head globals;
+ /* Regular expressions for local symbols in this version. */
+ struct bfd_elf_version_expr_head locals;
+ /* List of versions which this version depends upon. */
+ struct bfd_elf_version_deps *deps;
+ /* Index of the version name. This is used within BFD. */
+ unsigned int name_indx;
+ /* Whether this version tree was used. This is used within BFD. */
+ int used;
+ /* Matching hook. */
+ struct bfd_elf_version_expr *(*match)
+ (struct bfd_elf_version_expr_head *head,
+ struct bfd_elf_version_expr *prev, const char *sym);
+};
+
+struct bfd_elf_dynamic_list
+{
+ struct bfd_elf_version_expr_head head;
+ struct bfd_elf_version_expr *(*match)
+ (struct bfd_elf_version_expr_head *head,
+ struct bfd_elf_version_expr *prev, const char *sym);
+};
+
+#endif
diff --git a/include/symcat.h b/include/symcat.h
new file mode 100644
index 0000000..b461287
--- /dev/null
+++ b/include/symcat.h
@@ -0,0 +1,55 @@
+/* Symbol concatenation utilities.
+
+ Copyright (C) 1998, 2000, 2010 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef SYM_CAT_H
+#define SYM_CAT_H
+
+#if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE)
+#define CONCAT2(a,b) a##b
+#define CONCAT3(a,b,c) a##b##c
+#define CONCAT4(a,b,c,d) a##b##c##d
+#define CONCAT5(a,b,c,d,e) a##b##c##d##e
+#define CONCAT6(a,b,c,d,e,f) a##b##c##d##e##f
+#define STRINGX(s) #s
+#else
+/* Note one should never pass extra whitespace to the CONCATn macros,
+ e.g. CONCAT2(foo, bar) because traditonal C will keep the space between
+ the two labels instead of concatenating them. Instead, make sure to
+ write CONCAT2(foo,bar). */
+#define CONCAT2(a,b) a/**/b
+#define CONCAT3(a,b,c) a/**/b/**/c
+#define CONCAT4(a,b,c,d) a/**/b/**/c/**/d
+#define CONCAT5(a,b,c,d,e) a/**/b/**/c/**/d/**/e
+#define CONCAT6(a,b,c,d,e,f) a/**/b/**/c/**/d/**/e/**/f
+#define STRINGX(s) "s"
+#endif
+
+#define XCONCAT2(a,b) CONCAT2(a,b)
+#define XCONCAT3(a,b,c) CONCAT3(a,b,c)
+#define XCONCAT4(a,b,c,d) CONCAT4(a,b,c,d)
+#define XCONCAT5(a,b,c,d,e) CONCAT5(a,b,c,d,e)
+#define XCONCAT6(a,b,c,d,e,f) CONCAT6(a,b,c,d,e,f)
+
+/* Note the layer of indirection here is typically used to allow
+ stringification of the expansion of macros. I.e. "#define foo
+ bar", "XSTRING(foo)", to yield "bar". Be aware that this only
+ works for __STDC__, not for traditional C which will still resolve
+ to "foo". */
+#define XSTRING(s) STRINGX(s)
+
+#endif /* SYM_CAT_H */
diff --git a/lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtbegin.o b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtbegin.o
new file mode 100644
index 0000000..12b8d27
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtbeginS.o b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtbeginS.o
new file mode 100644
index 0000000..05d15d2
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtbeginS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtbeginT.o b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtbeginT.o
new file mode 100644
index 0000000..12b8d27
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtbeginT.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtend.o b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtend.o
new file mode 100644
index 0000000..4ca8b96
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtendS.o b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtendS.o
new file mode 100644
index 0000000..4ca8b96
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/crtendS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/armv7-a/libgcc.a b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/libgcc.a
new file mode 100644
index 0000000..27e55d3
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/armv7-a/libgcov.a b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/libgcov.a
new file mode 100644
index 0000000..dcbced3
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtbegin.o b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtbegin.o
new file mode 100644
index 0000000..3ffa20e
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtbeginS.o b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtbeginS.o
new file mode 100644
index 0000000..2069754
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtbeginS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtbeginT.o b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtbeginT.o
new file mode 100644
index 0000000..3ffa20e
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtbeginT.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtend.o b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtend.o
new file mode 100644
index 0000000..79e9f19
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtendS.o b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtendS.o
new file mode 100644
index 0000000..79e9f19
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/crtendS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/libgcc.a b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/libgcc.a
new file mode 100644
index 0000000..91f4956
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/libgcov.a b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/libgcov.a
new file mode 100644
index 0000000..257feef
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/armv7-a/thumb/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/crtbegin.o b/lib/gcc/arm-linux-androideabi/4.7/crtbegin.o
new file mode 100644
index 0000000..2b59ffe
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/crtbeginS.o b/lib/gcc/arm-linux-androideabi/4.7/crtbeginS.o
new file mode 100644
index 0000000..97ca32e
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/crtbeginS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/crtbeginT.o b/lib/gcc/arm-linux-androideabi/4.7/crtbeginT.o
new file mode 100644
index 0000000..2b59ffe
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/crtbeginT.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/crtend.o b/lib/gcc/arm-linux-androideabi/4.7/crtend.o
new file mode 100644
index 0000000..38858b1
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/crtendS.o b/lib/gcc/arm-linux-androideabi/4.7/crtendS.o
new file mode 100644
index 0000000..38858b1
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/crtendS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include-fixed/README b/lib/gcc/arm-linux-androideabi/4.7/include-fixed/README
new file mode 100644
index 0000000..7086a77
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include-fixed/README
@@ -0,0 +1,14 @@
+This README file is copied into the directory for GCC-only header files
+when fixincludes is run by the makefile for GCC.
+
+Many of the files in this directory were automatically edited from the
+standard system header files by the fixincludes process. They are
+system-specific, and will not work on any other kind of system. They
+are also not part of GCC. The reason we have to do this is because
+GCC requires ANSI C headers and many vendors supply ANSI-incompatible
+headers.
+
+Because this is an automated process, sometimes headers get "fixed"
+that do not, strictly speaking, need a fix. As long as nothing is broken
+by the process, it is just an unfortunate collateral inconvenience.
+We would like to rectify it, if it is not "too inconvenient".
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include-fixed/limits.h b/lib/gcc/arm-linux-androideabi/4.7/include-fixed/limits.h
new file mode 100644
index 0000000..a1c35e9
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include-fixed/limits.h
@@ -0,0 +1,172 @@
+/* Copyright (C) 1992, 1994, 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* This administrivia gets added to the beginning of limits.h
+ if the system has its own version of limits.h. */
+
+/* We use _GCC_LIMITS_H_ because we want this not to match
+ any macros that the system's limits.h uses for its own purposes. */
+#ifndef _GCC_LIMITS_H_ /* Terminated in limity.h. */
+#define _GCC_LIMITS_H_
+
+#ifndef _LIBC_LIMITS_H_
+/* Use "..." so that we find syslimits.h only in this same directory. */
+#include "syslimits.h"
+#endif
+/* Copyright (C) 1991, 1992, 1993, 1996, 1997, 1998, 1999, 2000, 2001,
+ 2002 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef _LIMITS_H___
+#define _LIMITS_H___
+
+/* Number of bits in a `char'. */
+#undef CHAR_BIT
+#define CHAR_BIT __CHAR_BIT__
+
+/* Maximum length of a multibyte character. */
+#ifndef MB_LEN_MAX
+#define MB_LEN_MAX 1
+#endif
+
+/* Minimum and maximum values a `signed char' can hold. */
+#undef SCHAR_MIN
+#define SCHAR_MIN (-SCHAR_MAX - 1)
+#undef SCHAR_MAX
+#define SCHAR_MAX __SCHAR_MAX__
+
+/* Maximum value an `unsigned char' can hold. (Minimum is 0). */
+#undef UCHAR_MAX
+#if __SCHAR_MAX__ == __INT_MAX__
+# define UCHAR_MAX (SCHAR_MAX * 2U + 1U)
+#else
+# define UCHAR_MAX (SCHAR_MAX * 2 + 1)
+#endif
+
+/* Minimum and maximum values a `char' can hold. */
+#ifdef __CHAR_UNSIGNED__
+# undef CHAR_MIN
+# if __SCHAR_MAX__ == __INT_MAX__
+# define CHAR_MIN 0U
+# else
+# define CHAR_MIN 0
+# endif
+# undef CHAR_MAX
+# define CHAR_MAX UCHAR_MAX
+#else
+# undef CHAR_MIN
+# define CHAR_MIN SCHAR_MIN
+# undef CHAR_MAX
+# define CHAR_MAX SCHAR_MAX
+#endif
+
+/* Minimum and maximum values a `signed short int' can hold. */
+#undef SHRT_MIN
+#define SHRT_MIN (-SHRT_MAX - 1)
+#undef SHRT_MAX
+#define SHRT_MAX __SHRT_MAX__
+
+/* Maximum value an `unsigned short int' can hold. (Minimum is 0). */
+#undef USHRT_MAX
+#if __SHRT_MAX__ == __INT_MAX__
+# define USHRT_MAX (SHRT_MAX * 2U + 1U)
+#else
+# define USHRT_MAX (SHRT_MAX * 2 + 1)
+#endif
+
+/* Minimum and maximum values a `signed int' can hold. */
+#undef INT_MIN
+#define INT_MIN (-INT_MAX - 1)
+#undef INT_MAX
+#define INT_MAX __INT_MAX__
+
+/* Maximum value an `unsigned int' can hold. (Minimum is 0). */
+#undef UINT_MAX
+#define UINT_MAX (INT_MAX * 2U + 1U)
+
+/* Minimum and maximum values a `signed long int' can hold.
+ (Same as `int'). */
+#undef LONG_MIN
+#define LONG_MIN (-LONG_MAX - 1L)
+#undef LONG_MAX
+#define LONG_MAX __LONG_MAX__
+
+/* Maximum value an `unsigned long int' can hold. (Minimum is 0). */
+#undef ULONG_MAX
+#define ULONG_MAX (LONG_MAX * 2UL + 1UL)
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+/* Minimum and maximum values a `signed long long int' can hold. */
+# undef LLONG_MIN
+# define LLONG_MIN (-LLONG_MAX - 1LL)
+# undef LLONG_MAX
+# define LLONG_MAX __LONG_LONG_MAX__
+
+/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */
+# undef ULLONG_MAX
+# define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL)
+#endif
+
+#if defined (__GNU_LIBRARY__) ? defined (__USE_GNU) : !defined (__STRICT_ANSI__)
+/* Minimum and maximum values a `signed long long int' can hold. */
+# undef LONG_LONG_MIN
+# define LONG_LONG_MIN (-LONG_LONG_MAX - 1LL)
+# undef LONG_LONG_MAX
+# define LONG_LONG_MAX __LONG_LONG_MAX__
+
+/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */
+# undef ULONG_LONG_MAX
+# define ULONG_LONG_MAX (LONG_LONG_MAX * 2ULL + 1ULL)
+#endif
+
+#endif /* _LIMITS_H___ */
+/* This administrivia gets added to the end of limits.h
+ if the system has its own version of limits.h. */
+
+#else /* not _GCC_LIMITS_H_ */
+
+#ifdef _GCC_NEXT_LIMITS_H
+#include_next <limits.h> /* recurse down to the real one */
+#endif
+
+#endif /* not _GCC_LIMITS_H_ */
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include-fixed/linux/a.out.h b/lib/gcc/arm-linux-androideabi/4.7/include-fixed/linux/a.out.h
new file mode 100644
index 0000000..306398e
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include-fixed/linux/a.out.h
@@ -0,0 +1,229 @@
+/* DO NOT EDIT THIS FILE.
+
+ It has been auto-edited by fixincludes from:
+
+ "/Volumes/android/toolchain-build-4.7/prefix/sysroot/usr/include/linux/a.out.h"
+
+ This had to be done to correct non-standard usages in the
+ original, manufacturer supplied header file. */
+
+/****************************************************************************
+ ****************************************************************************
+ ***
+ *** This header was automatically generated from a Linux kernel header
+ *** of the same name, to make information necessary for userspace to
+ *** call into the kernel available to libc. It contains only constants,
+ *** structures, and macros generated from the original header, and thus,
+ *** contains no copyrightable information.
+ ***
+ ****************************************************************************
+ ****************************************************************************/
+#ifndef __A_OUT_GNU_H__
+#define __A_OUT_GNU_H__
+
+#define __GNU_EXEC_MACROS__
+
+#ifndef __STRUCT_EXEC_OVERRIDE__
+
+#include <asm/a.out.h>
+
+#endif
+
+enum machine_type {
+#ifdef M_OLDSUN2
+ M__OLDSUN2 = M_OLDSUN2,
+#else
+ M_OLDSUN2 = 0,
+#endif
+#ifdef M_68010
+ M__68010 = M_68010,
+#else
+ M_68010 = 1,
+#endif
+#ifdef M_68020
+ M__68020 = M_68020,
+#else
+ M_68020 = 2,
+#endif
+#ifdef M_SPARC
+ M__SPARC = M_SPARC,
+#else
+ M_SPARC = 3,
+#endif
+
+ M_386 = 100,
+ M_MIPS1 = 151,
+ M_MIPS2 = 152
+};
+
+#ifndef N_MAGIC
+#define N_MAGIC(exec) ((exec).a_info & 0xffff)
+#endif
+#define N_MACHTYPE(exec) ((enum machine_type)(((exec).a_info >> 16) & 0xff))
+#define N_FLAGS(exec) (((exec).a_info >> 24) & 0xff)
+#define N_SET_INFO(exec, magic, type, flags) ((exec).a_info = ((magic) & 0xffff) | (((int)(type) & 0xff) << 16) | (((flags) & 0xff) << 24))
+#define N_SET_MAGIC(exec, magic) ((exec).a_info = (((exec).a_info & 0xffff0000) | ((magic) & 0xffff)))
+
+#define N_SET_MACHTYPE(exec, machtype) ((exec).a_info = ((exec).a_info&0xff00ffff) | ((((int)(machtype))&0xff) << 16))
+
+#define N_SET_FLAGS(exec, flags) ((exec).a_info = ((exec).a_info&0x00ffffff) | (((flags) & 0xff) << 24))
+
+#define OMAGIC 0407
+
+#define NMAGIC 0410
+
+#define ZMAGIC 0413
+
+#define QMAGIC 0314
+
+#define CMAGIC 0421
+
+#ifndef N_BADMAG
+#define N_BADMAG(x) (N_MAGIC(x) != OMAGIC && N_MAGIC(x) != NMAGIC && N_MAGIC(x) != ZMAGIC && N_MAGIC(x) != QMAGIC)
+#endif
+
+#define _N_HDROFF(x) (1024 - sizeof (struct exec))
+
+#ifndef N_TXTOFF
+#define N_TXTOFF(x) (N_MAGIC(x) == ZMAGIC ? _N_HDROFF((x)) + sizeof (struct exec) : (N_MAGIC(x) == QMAGIC ? 0 : sizeof (struct exec)))
+#endif
+
+#ifndef N_DATOFF
+#define N_DATOFF(x) (N_TXTOFF(x) + (x).a_text)
+#endif
+
+#ifndef N_TRELOFF
+#define N_TRELOFF(x) (N_DATOFF(x) + (x).a_data)
+#endif
+
+#ifndef N_DRELOFF
+#define N_DRELOFF(x) (N_TRELOFF(x) + N_TRSIZE(x))
+#endif
+
+#ifndef N_SYMOFF
+#define N_SYMOFF(x) (N_DRELOFF(x) + N_DRSIZE(x))
+#endif
+
+#ifndef N_STROFF
+#define N_STROFF(x) (N_SYMOFF(x) + N_SYMSIZE(x))
+#endif
+
+#ifndef N_TXTADDR
+#define N_TXTADDR(x) (N_MAGIC(x) == QMAGIC ? PAGE_SIZE : 0)
+#endif
+
+#if defined(vax) || defined(hp300) || defined(pyr)
+#define SEGMENT_SIZE page_size
+#endif
+#ifdef sony
+#define SEGMENT_SIZE 0x2000
+#endif
+#ifdef is68k
+#define SEGMENT_SIZE 0x20000
+#endif
+#if defined(m68k) && defined(PORTAR)
+#define PAGE_SIZE 0x400
+#define SEGMENT_SIZE PAGE_SIZE
+#endif
+
+#ifdef __linux__
+#include <asm/page.h>
+#if defined(__i386__) || defined(__mc68000__)
+#define SEGMENT_SIZE 1024
+#else
+#ifndef SEGMENT_SIZE
+#define SEGMENT_SIZE PAGE_SIZE
+#endif
+#endif
+#endif
+
+#define _N_SEGMENT_ROUND(x) ALIGN(x, SEGMENT_SIZE)
+
+#define _N_TXTENDADDR(x) (N_TXTADDR(x)+(x).a_text)
+
+#ifndef N_DATADDR
+#define N_DATADDR(x) (N_MAGIC(x)==OMAGIC? (_N_TXTENDADDR(x)) : (_N_SEGMENT_ROUND (_N_TXTENDADDR(x))))
+#endif
+
+#ifndef N_BSSADDR
+#define N_BSSADDR(x) (N_DATADDR(x) + (x).a_data)
+#endif
+
+#ifndef N_NLIST_DECLARED
+struct nlist {
+ union {
+ char *n_name;
+ struct nlist *n_next;
+ long n_strx;
+ } n_un;
+ unsigned char n_type;
+ char n_other;
+ short n_desc;
+ unsigned long n_value;
+};
+#endif
+
+#ifndef N_UNDF
+#define N_UNDF 0
+#endif
+#ifndef N_ABS
+#define N_ABS 2
+#endif
+#ifndef N_TEXT
+#define N_TEXT 4
+#endif
+#ifndef N_DATA
+#define N_DATA 6
+#endif
+#ifndef N_BSS
+#define N_BSS 8
+#endif
+#ifndef N_FN
+#define N_FN 15
+#endif
+
+#ifndef N_EXT
+#define N_EXT 1
+#endif
+#ifndef N_TYPE
+#define N_TYPE 036
+#endif
+#ifndef N_STAB
+#define N_STAB 0340
+#endif
+
+#define N_INDR 0xa
+
+#define N_SETA 0x14
+#define N_SETT 0x16
+#define N_SETD 0x18
+#define N_SETB 0x1A
+
+#define N_SETV 0x1C
+
+#ifndef N_RELOCATION_INFO_DECLARED
+
+struct relocation_info
+{
+
+ int r_address;
+
+ unsigned int r_symbolnum:24;
+
+ unsigned int r_pcrel:1;
+
+ unsigned int r_length:2;
+
+ unsigned int r_extern:1;
+
+#ifdef NS32K
+ unsigned r_bsr:1;
+ unsigned r_disp:1;
+ unsigned r_pad:2;
+#else
+ unsigned int r_pad:4;
+#endif
+};
+#endif
+
+#endif
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include-fixed/stdio.h b/lib/gcc/arm-linux-androideabi/4.7/include-fixed/stdio.h
new file mode 100644
index 0000000..357841c
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include-fixed/stdio.h
@@ -0,0 +1,462 @@
+/* DO NOT EDIT THIS FILE.
+
+ It has been auto-edited by fixincludes from:
+
+ "/Volumes/android/toolchain-build-4.7/prefix/sysroot/usr/include/stdio.h"
+
+ This had to be done to correct non-standard usages in the
+ original, manufacturer supplied header file. */
+
+/* $OpenBSD: stdio.h,v 1.35 2006/01/13 18:10:09 miod Exp $ */
+/* $NetBSD: stdio.h,v 1.18 1996/04/25 18:29:21 jtc Exp $ */
+
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)stdio.h 5.17 (Berkeley) 6/3/91
+ */
+
+#ifndef _STDIO_H_
+#define _STDIO_H_
+
+#include <sys/cdefs.h>
+#include <sys/_types.h>
+
+/* __gnuc_va_list and size_t must be defined by stdio.h according to Posix */
+#define __need___va_list
+#include <stdarg.h>
+
+/* note that this forces stddef.h to *only* define size_t */
+#define __need_size_t
+#include <stddef.h>
+
+#include <stddef.h>
+
+#if __BSD_VISIBLE || __POSIX_VISIBLE || __XPG_VISIBLE
+#include <sys/types.h> /* XXX should be removed */
+#endif
+
+#ifndef _SIZE_T_DEFINED_
+#define _SIZE_T_DEFINED_
+typedef unsigned long size_t;
+#endif
+
+#ifndef _OFF_T_DEFINED_
+#define _OFF_T_DEFINED_
+typedef long off_t;
+#endif
+
+#ifndef NULL
+#ifdef __GNUG__
+#define NULL __null
+#else
+#define NULL 0L
+#endif
+#endif
+
+#define _FSTDIO /* Define for new stdio with functions. */
+
+typedef off_t fpos_t; /* stdio file position type */
+
+/*
+ * NB: to fit things in six character monocase externals, the stdio
+ * code uses the prefix `__s' for stdio objects, typically followed
+ * by a three-character attempt at a mnemonic.
+ */
+
+/* stdio buffers */
+struct __sbuf {
+ unsigned char *_base;
+ int _size;
+};
+
+/*
+ * stdio state variables.
+ *
+ * The following always hold:
+ *
+ * if (_flags&(__SLBF|__SWR)) == (__SLBF|__SWR),
+ * _lbfsize is -_bf._size, else _lbfsize is 0
+ * if _flags&__SRD, _w is 0
+ * if _flags&__SWR, _r is 0
+ *
+ * This ensures that the getc and putc macros (or inline functions) never
+ * try to write or read from a file that is in `read' or `write' mode.
+ * (Moreover, they can, and do, automatically switch from read mode to
+ * write mode, and back, on "r+" and "w+" files.)
+ *
+ * _lbfsize is used only to make the inline line-buffered output stream
+ * code as compact as possible.
+ *
+ * _ub, _up, and _ur are used when ungetc() pushes back more characters
+ * than fit in the current _bf, or when ungetc() pushes back a character
+ * that does not match the previous one in _bf. When this happens,
+ * _ub._base becomes non-nil (i.e., a stream has ungetc() data iff
+ * _ub._base!=NULL) and _up and _ur save the current values of _p and _r.
+ *
+ * NOTE: if you change this structure, you also need to update the
+ * std() initializer in findfp.c.
+ */
+typedef struct __sFILE {
+ unsigned char *_p; /* current position in (some) buffer */
+ int _r; /* read space left for getc() */
+ int _w; /* write space left for putc() */
+ short _flags; /* flags, below; this FILE is free if 0 */
+ short _file; /* fileno, if Unix descriptor, else -1 */
+ struct __sbuf _bf; /* the buffer (at least 1 byte, if !NULL) */
+ int _lbfsize; /* 0 or -_bf._size, for inline putc */
+
+ /* operations */
+ void *_cookie; /* cookie passed to io functions */
+ int (*_close)(void *);
+ int (*_read)(void *, char *, int);
+ fpos_t (*_seek)(void *, fpos_t, int);
+ int (*_write)(void *, const char *, int);
+
+ /* extension data, to avoid further ABI breakage */
+ struct __sbuf _ext;
+ /* data for long sequences of ungetc() */
+ unsigned char *_up; /* saved _p when _p is doing ungetc data */
+ int _ur; /* saved _r when _r is counting ungetc data */
+
+ /* tricks to meet minimum requirements even when malloc() fails */
+ unsigned char _ubuf[3]; /* guarantee an ungetc() buffer */
+ unsigned char _nbuf[1]; /* guarantee a getc() buffer */
+
+ /* separate buffer for fgetln() when line crosses buffer boundary */
+ struct __sbuf _lb; /* buffer for fgetln() */
+
+ /* Unix stdio files get aligned to block boundaries on fseek() */
+ int _blksize; /* stat.st_blksize (may be != _bf._size) */
+ fpos_t _offset; /* current lseek offset */
+} FILE;
+
+__BEGIN_DECLS
+extern FILE __sF[];
+__END_DECLS
+
+#define __SLBF 0x0001 /* line buffered */
+#define __SNBF 0x0002 /* unbuffered */
+#define __SRD 0x0004 /* OK to read */
+#define __SWR 0x0008 /* OK to write */
+ /* RD and WR are never simultaneously asserted */
+#define __SRW 0x0010 /* open for reading & writing */
+#define __SEOF 0x0020 /* found EOF */
+#define __SERR 0x0040 /* found error */
+#define __SMBF 0x0080 /* _buf is from malloc */
+#define __SAPP 0x0100 /* fdopen()ed in append mode */
+#define __SSTR 0x0200 /* this is an sprintf/snprintf string */
+#define __SOPT 0x0400 /* do fseek() optimisation */
+#define __SNPT 0x0800 /* do not do fseek() optimisation */
+#define __SOFF 0x1000 /* set iff _offset is in fact correct */
+#define __SMOD 0x2000 /* true => fgetln modified _p text */
+#define __SALC 0x4000 /* allocate string space dynamically */
+
+/*
+ * The following three definitions are for ANSI C, which took them
+ * from System V, which brilliantly took internal interface macros and
+ * made them official arguments to setvbuf(), without renaming them.
+ * Hence, these ugly _IOxxx names are *supposed* to appear in user code.
+ *
+ * Although numbered as their counterparts above, the implementation
+ * does not rely on this.
+ */
+#define _IOFBF 0 /* setvbuf should set fully buffered */
+#define _IOLBF 1 /* setvbuf should set line buffered */
+#define _IONBF 2 /* setvbuf should set unbuffered */
+
+#define BUFSIZ 1024 /* size of buffer used by setbuf */
+
+#define EOF (-1)
+
+/*
+ * FOPEN_MAX is a minimum maximum, and should be the number of descriptors
+ * that the kernel can provide without allocation of a resource that can
+ * fail without the process sleeping. Do not use this for anything.
+ */
+#define FOPEN_MAX 20 /* must be <= OPEN_MAX <sys/syslimits.h> */
+#define FILENAME_MAX 1024 /* must be <= PATH_MAX <sys/syslimits.h> */
+
+/* System V/ANSI C; this is the wrong way to do this, do *not* use these. */
+#if __BSD_VISIBLE || __XPG_VISIBLE
+#define P_tmpdir "/tmp/"
+#endif
+#define L_tmpnam 1024 /* XXX must be == PATH_MAX */
+#define TMP_MAX 308915776
+
+#ifndef SEEK_SET
+#define SEEK_SET 0 /* set file offset to offset */
+#endif
+#ifndef SEEK_CUR
+#define SEEK_CUR 1 /* set file offset to current plus offset */
+#endif
+#ifndef SEEK_END
+#define SEEK_END 2 /* set file offset to EOF plus offset */
+#endif
+
+#define stdin (&__sF[0])
+#define stdout (&__sF[1])
+#define stderr (&__sF[2])
+
+/*
+ * Functions defined in ANSI C standard.
+ */
+__BEGIN_DECLS
+void clearerr(FILE *);
+int fclose(FILE *);
+int feof(FILE *);
+int ferror(FILE *);
+int fflush(FILE *);
+int fgetc(FILE *);
+int fgetpos(FILE *, fpos_t *);
+char *fgets(char *, int, FILE *);
+FILE *fopen(const char *, const char *);
+int fprintf(FILE *, const char *, ...);
+int fputc(int, FILE *);
+int fputs(const char *, FILE *);
+size_t fread(void *, size_t, size_t, FILE *);
+FILE *freopen(const char *, const char *, FILE *);
+int fscanf(FILE *, const char *, ...);
+int fseek(FILE *, long, int);
+int fseeko(FILE *, off_t, int);
+int fsetpos(FILE *, const fpos_t *);
+long ftell(FILE *);
+off_t ftello(FILE *);
+size_t fwrite(const void *, size_t, size_t, FILE *);
+int getc(FILE *);
+int getchar(void);
+char *gets(char *);
+#if __BSD_VISIBLE && !defined(__SYS_ERRLIST)
+#define __SYS_ERRLIST
+
+extern int sys_nerr; /* perror(3) external variables */
+extern char *sys_errlist[];
+#endif
+void perror(const char *);
+int printf(const char *, ...);
+int putc(int, FILE *);
+int putchar(int);
+int puts(const char *);
+int remove(const char *);
+int rename(const char *, const char *);
+void rewind(FILE *);
+int scanf(const char *, ...);
+void setbuf(FILE *, char *);
+int setvbuf(FILE *, char *, int, size_t);
+int sprintf(char *, const char *, ...);
+int sscanf(const char *, const char *, ...);
+FILE *tmpfile(void);
+char *tmpnam(char *);
+int ungetc(int, FILE *);
+int vfprintf(FILE *, const char *, __gnuc_va_list);
+int vprintf(const char *, __gnuc_va_list);
+int vsprintf(char *, const char *, __gnuc_va_list);
+
+#if __ISO_C_VISIBLE >= 1999 || __BSD_VISIBLE
+int snprintf(char *, size_t, const char *, ...)
+ __attribute__((__format__ (printf, 3, 4)))
+ __attribute__((__nonnull__ (3)));
+int vfscanf(FILE *, const char *, __gnuc_va_list)
+ __attribute__((__format__ (scanf, 2, 0)))
+ __attribute__((__nonnull__ (2)));
+int vscanf(const char *, __gnuc_va_list)
+ __attribute__((__format__ (scanf, 1, 0)))
+ __attribute__((__nonnull__ (1)));
+int vsnprintf(char *, size_t, const char *, __gnuc_va_list)
+ __attribute__((__format__ (printf, 3, 0)))
+ __attribute__((__nonnull__ (3)));
+int vsscanf(const char *, const char *, __gnuc_va_list)
+ __attribute__((__format__ (scanf, 2, 0)))
+ __attribute__((__nonnull__ (2)));
+#endif /* __ISO_C_VISIBLE >= 1999 || __BSD_VISIBLE */
+
+__END_DECLS
+
+
+/*
+ * Functions defined in POSIX 1003.1.
+ */
+#if __BSD_VISIBLE || __POSIX_VISIBLE || __XPG_VISIBLE
+#define L_ctermid 1024 /* size for ctermid(); PATH_MAX */
+#define L_cuserid 9 /* size for cuserid(); UT_NAMESIZE + 1 */
+
+__BEGIN_DECLS
+#if 0 /* MISSING FROM BIONIC */
+char *ctermid(char *);
+char *cuserid(char *);
+#endif /* MISSING */
+FILE *fdopen(int, const char *);
+int fileno(FILE *);
+
+#if (__POSIX_VISIBLE >= 199209)
+int pclose(FILE *);
+FILE *popen(const char *, const char *);
+#endif
+
+#if __POSIX_VISIBLE >= 199506
+void flockfile(FILE *);
+int ftrylockfile(FILE *);
+void funlockfile(FILE *);
+
+/*
+ * These are normally used through macros as defined below, but POSIX
+ * requires functions as well.
+ */
+int getc_unlocked(FILE *);
+int getchar_unlocked(void);
+int putc_unlocked(int, FILE *);
+int putchar_unlocked(int);
+#endif /* __POSIX_VISIBLE >= 199506 */
+
+#if __XPG_VISIBLE
+char *tempnam(const char *, const char *);
+#endif
+__END_DECLS
+
+#endif /* __BSD_VISIBLE || __POSIX_VISIBLE || __XPG_VISIBLE */
+
+/*
+ * Routines that are purely local.
+ */
+#if __BSD_VISIBLE
+__BEGIN_DECLS
+int asprintf(char **, const char *, ...)
+ __attribute__((__format__ (printf, 2, 3)))
+ __attribute__((__nonnull__ (2)));
+char *fgetln(FILE *, size_t *);
+int fpurge(FILE *);
+int getw(FILE *);
+int putw(int, FILE *);
+void setbuffer(FILE *, char *, int);
+int setlinebuf(FILE *);
+int vasprintf(char **, const char *, __gnuc_va_list)
+ __attribute__((__format__ (printf, 2, 0)))
+ __attribute__((__nonnull__ (2)));
+__END_DECLS
+
+/*
+ * Stdio function-access interface.
+ */
+__BEGIN_DECLS
+FILE *funopen(const void *,
+ int (*)(void *, char *, int),
+ int (*)(void *, const char *, int),
+ fpos_t (*)(void *, fpos_t, int),
+ int (*)(void *));
+__END_DECLS
+#define fropen(cookie, fn) funopen(cookie, fn, 0, 0, 0)
+#define fwopen(cookie, fn) funopen(cookie, 0, fn, 0, 0)
+#endif /* __BSD_VISIBLE */
+
+/*
+ * Functions internal to the implementation.
+ */
+__BEGIN_DECLS
+int __srget(FILE *);
+int __swbuf(int, FILE *);
+__END_DECLS
+
+/*
+ * The __sfoo macros are here so that we can
+ * define function versions in the C library.
+ */
+#define __sgetc(p) (--(p)->_r < 0 ? __srget(p) : (int)(*(p)->_p++))
+#if defined(__GNUC__)
+static __inline int __sputc(int _c, FILE *_p) {
+ if (--_p->_w >= 0 || (_p->_w >= _p->_lbfsize && (char)_c != '\n'))
+ return (*_p->_p++ = _c);
+ else
+ return (__swbuf(_c, _p));
+}
+#else
+/*
+ * This has been tuned to generate reasonable code on the vax using pcc.
+ */
+#define __sputc(c, p) \
+ (--(p)->_w < 0 ? \
+ (p)->_w >= (p)->_lbfsize ? \
+ (*(p)->_p = (c)), *(p)->_p != '\n' ? \
+ (int)*(p)->_p++ : \
+ __swbuf('\n', p) : \
+ __swbuf((int)(c), p) : \
+ (*(p)->_p = (c), (int)*(p)->_p++))
+#endif
+
+#define __sfeof(p) (((p)->_flags & __SEOF) != 0)
+#define __sferror(p) (((p)->_flags & __SERR) != 0)
+#define __sclearerr(p) ((void)((p)->_flags &= ~(__SERR|__SEOF)))
+#define __sfileno(p) ((p)->_file)
+
+#define feof(p) __sfeof(p)
+#define ferror(p) __sferror(p)
+
+#ifndef _POSIX_THREADS
+#define clearerr(p) __sclearerr(p)
+#endif
+
+#if __POSIX_VISIBLE
+#define fileno(p) __sfileno(p)
+#endif
+
+#ifndef lint
+#ifndef _POSIX_THREADS
+#define getc(fp) __sgetc(fp)
+#endif /* _POSIX_THREADS */
+#define getc_unlocked(fp) __sgetc(fp)
+/*
+ * The macro implementations of putc and putc_unlocked are not
+ * fully POSIX compliant; they do not set errno on failure
+ */
+#if __BSD_VISIBLE
+#ifndef _POSIX_THREADS
+#define putc(x, fp) __sputc(x, fp)
+#endif /* _POSIX_THREADS */
+#define putc_unlocked(x, fp) __sputc(x, fp)
+#endif /* __BSD_VISIBLE */
+#endif /* lint */
+
+#define getchar() getc(stdin)
+#define putchar(x) putc(x, stdout)
+#define getchar_unlocked() getc_unlocked(stdin)
+#define putchar_unlocked(c) putc_unlocked(c, stdout)
+
+#ifdef _GNU_SOURCE
+/*
+ * glibc defines dprintf(int, const char*, ...), which is poorly named
+ * and likely to conflict with locally defined debugging printfs
+ * fdprintf is a better name, and some programs that use fdprintf use a
+ * #define fdprintf dprintf for compatibility
+ */
+int fdprintf(int, const char*, ...);
+int vfdprintf(int, const char*, __gnuc_va_list);
+#endif /* _GNU_SOURCE */
+
+#endif /* _STDIO_H_ */
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include-fixed/sys/types.h b/lib/gcc/arm-linux-androideabi/4.7/include-fixed/sys/types.h
new file mode 100644
index 0000000..5432fe5
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include-fixed/sys/types.h
@@ -0,0 +1,140 @@
+/* DO NOT EDIT THIS FILE.
+
+ It has been auto-edited by fixincludes from:
+
+ "/Volumes/android/toolchain-build-4.7/prefix/sysroot/usr/include/sys/types.h"
+
+ This had to be done to correct non-standard usages in the
+ original, manufacturer supplied header file. */
+
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef _SYS_TYPES_H_
+#define _SYS_TYPES_H_
+
+#define __need_size_t
+#define __need_ptrdiff_t
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/cdefs.h>
+
+#include <linux/posix_types.h>
+#include <asm/types.h>
+#include <linux/types.h>
+#include <machine/kernel.h>
+
+typedef __u32 __kernel_dev_t;
+
+/* be careful with __kernel_gid_t and __kernel_uid_t
+ * these are defined as 16-bit for legacy reason, but
+ * the kernel uses 32-bits instead.
+ *
+ * 32-bit valuea are required for Android, so use
+ * __kernel_uid32_t and __kernel_gid32_t
+ */
+
+typedef __kernel_blkcnt_t blkcnt_t;
+typedef __kernel_blksize_t blksize_t;
+typedef __kernel_clock_t clock_t;
+typedef __kernel_clockid_t clockid_t;
+typedef __kernel_dev_t dev_t;
+typedef __kernel_fsblkcnt_t fsblkcnt_t;
+typedef __kernel_fsfilcnt_t fsfilcnt_t;
+typedef __kernel_gid32_t gid_t;
+typedef __kernel_id_t id_t;
+typedef __kernel_ino_t ino_t;
+typedef __kernel_key_t key_t;
+typedef __kernel_mode_t mode_t;
+typedef __kernel_nlink_t nlink_t;
+#ifndef _OFF_T_DEFINED_
+#define _OFF_T_DEFINED_
+typedef __kernel_off_t off_t;
+#endif
+typedef __kernel_loff_t loff_t;
+typedef loff_t off64_t; /* GLibc-specific */
+
+typedef __kernel_pid_t pid_t;
+
+/* while POSIX wants these in <sys/types.h>, we
+ * declare then in <pthread.h> instead */
+#if 0
+typedef .... pthread_attr_t;
+typedef .... pthread_cond_t;
+typedef .... pthread_condattr_t;
+typedef .... pthread_key_t;
+typedef .... pthread_mutex_t;
+typedef .... pthread_once_t;
+typedef .... pthread_rwlock_t;
+typedef .... pthread_rwlock_attr_t;
+typedef .... pthread_t;
+#endif
+
+#ifndef _SIZE_T_DEFINED_
+#define _SIZE_T_DEFINED_
+#if !defined(_GCC_SIZE_T)
+#define _GCC_SIZE_T
+typedef __SIZE_TYPE__ size_t;
+#endif
+
+#endif
+
+/* size_t is defined by the GCC-specific <stddef.h> */
+#ifndef _SSIZE_T_DEFINED_
+#define _SSIZE_T_DEFINED_
+typedef long int ssize_t;
+#endif
+
+typedef __kernel_suseconds_t suseconds_t;
+typedef __kernel_time_t time_t;
+typedef __kernel_uid32_t uid_t;
+typedef signed long useconds_t;
+
+typedef __kernel_daddr_t daddr_t;
+typedef __kernel_timer_t timer_t;
+typedef __kernel_mqd_t mqd_t;
+
+typedef __kernel_caddr_t caddr_t;
+typedef unsigned int uint_t;
+typedef unsigned int uint;
+
+/* for some applications */
+#include <sys/sysmacros.h>
+
+#ifdef __BSD_VISIBLE
+typedef unsigned char u_char;
+typedef unsigned short u_short;
+typedef unsigned int u_int;
+typedef unsigned long u_long;
+
+typedef uint32_t u_int32_t;
+typedef uint16_t u_int16_t;
+typedef uint8_t u_int8_t;
+typedef uint64_t u_int64_t;
+#endif
+
+#endif
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include-fixed/syslimits.h b/lib/gcc/arm-linux-androideabi/4.7/include-fixed/syslimits.h
new file mode 100644
index 0000000..a362802
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include-fixed/syslimits.h
@@ -0,0 +1,8 @@
+/* syslimits.h stands for the system's own limits.h file.
+ If we can use it ok unmodified, then we install this text.
+ If fixincludes fixes it, then the fixed version is installed
+ instead of this text. */
+
+#define _GCC_NEXT_LIMITS_H /* tell gcc's limits.h to recurse */
+#include_next <limits.h>
+#undef _GCC_NEXT_LIMITS_H
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include/arm_neon.h b/lib/gcc/arm-linux-androideabi/4.7/include/arm_neon.h
new file mode 100644
index 0000000..0567895
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include/arm_neon.h
@@ -0,0 +1,12176 @@
+/* ARM NEON intrinsics include file. This file is generated automatically
+ using neon-gen.ml. Please do not edit manually.
+
+ Copyright (C) 2006, 2007, 2009 Free Software Foundation, Inc.
+ Contributed by CodeSourcery.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _GCC_ARM_NEON_H
+#define _GCC_ARM_NEON_H 1
+
+#ifndef __ARM_NEON__
+#error You must enable NEON instructions (e.g. -mfloat-abi=softfp -mfpu=neon) to use arm_neon.h
+#else
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+typedef __builtin_neon_qi int8x8_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_hi int16x4_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_si int32x2_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_di int64x1_t;
+typedef __builtin_neon_sf float32x2_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_poly8 poly8x8_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_poly16 poly16x4_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_uqi uint8x8_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_uhi uint16x4_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_usi uint32x2_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_udi uint64x1_t;
+typedef __builtin_neon_qi int8x16_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_hi int16x8_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_si int32x4_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_di int64x2_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_sf float32x4_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_poly8 poly8x16_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_poly16 poly16x8_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_uqi uint8x16_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_uhi uint16x8_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_usi uint32x4_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_udi uint64x2_t __attribute__ ((__vector_size__ (16)));
+
+typedef float float32_t;
+typedef __builtin_neon_poly8 poly8_t;
+typedef __builtin_neon_poly16 poly16_t;
+
+typedef struct int8x8x2_t
+{
+ int8x8_t val[2];
+} int8x8x2_t;
+
+typedef struct int8x16x2_t
+{
+ int8x16_t val[2];
+} int8x16x2_t;
+
+typedef struct int16x4x2_t
+{
+ int16x4_t val[2];
+} int16x4x2_t;
+
+typedef struct int16x8x2_t
+{
+ int16x8_t val[2];
+} int16x8x2_t;
+
+typedef struct int32x2x2_t
+{
+ int32x2_t val[2];
+} int32x2x2_t;
+
+typedef struct int32x4x2_t
+{
+ int32x4_t val[2];
+} int32x4x2_t;
+
+typedef struct int64x1x2_t
+{
+ int64x1_t val[2];
+} int64x1x2_t;
+
+typedef struct int64x2x2_t
+{
+ int64x2_t val[2];
+} int64x2x2_t;
+
+typedef struct uint8x8x2_t
+{
+ uint8x8_t val[2];
+} uint8x8x2_t;
+
+typedef struct uint8x16x2_t
+{
+ uint8x16_t val[2];
+} uint8x16x2_t;
+
+typedef struct uint16x4x2_t
+{
+ uint16x4_t val[2];
+} uint16x4x2_t;
+
+typedef struct uint16x8x2_t
+{
+ uint16x8_t val[2];
+} uint16x8x2_t;
+
+typedef struct uint32x2x2_t
+{
+ uint32x2_t val[2];
+} uint32x2x2_t;
+
+typedef struct uint32x4x2_t
+{
+ uint32x4_t val[2];
+} uint32x4x2_t;
+
+typedef struct uint64x1x2_t
+{
+ uint64x1_t val[2];
+} uint64x1x2_t;
+
+typedef struct uint64x2x2_t
+{
+ uint64x2_t val[2];
+} uint64x2x2_t;
+
+typedef struct float32x2x2_t
+{
+ float32x2_t val[2];
+} float32x2x2_t;
+
+typedef struct float32x4x2_t
+{
+ float32x4_t val[2];
+} float32x4x2_t;
+
+typedef struct poly8x8x2_t
+{
+ poly8x8_t val[2];
+} poly8x8x2_t;
+
+typedef struct poly8x16x2_t
+{
+ poly8x16_t val[2];
+} poly8x16x2_t;
+
+typedef struct poly16x4x2_t
+{
+ poly16x4_t val[2];
+} poly16x4x2_t;
+
+typedef struct poly16x8x2_t
+{
+ poly16x8_t val[2];
+} poly16x8x2_t;
+
+typedef struct int8x8x3_t
+{
+ int8x8_t val[3];
+} int8x8x3_t;
+
+typedef struct int8x16x3_t
+{
+ int8x16_t val[3];
+} int8x16x3_t;
+
+typedef struct int16x4x3_t
+{
+ int16x4_t val[3];
+} int16x4x3_t;
+
+typedef struct int16x8x3_t
+{
+ int16x8_t val[3];
+} int16x8x3_t;
+
+typedef struct int32x2x3_t
+{
+ int32x2_t val[3];
+} int32x2x3_t;
+
+typedef struct int32x4x3_t
+{
+ int32x4_t val[3];
+} int32x4x3_t;
+
+typedef struct int64x1x3_t
+{
+ int64x1_t val[3];
+} int64x1x3_t;
+
+typedef struct int64x2x3_t
+{
+ int64x2_t val[3];
+} int64x2x3_t;
+
+typedef struct uint8x8x3_t
+{
+ uint8x8_t val[3];
+} uint8x8x3_t;
+
+typedef struct uint8x16x3_t
+{
+ uint8x16_t val[3];
+} uint8x16x3_t;
+
+typedef struct uint16x4x3_t
+{
+ uint16x4_t val[3];
+} uint16x4x3_t;
+
+typedef struct uint16x8x3_t
+{
+ uint16x8_t val[3];
+} uint16x8x3_t;
+
+typedef struct uint32x2x3_t
+{
+ uint32x2_t val[3];
+} uint32x2x3_t;
+
+typedef struct uint32x4x3_t
+{
+ uint32x4_t val[3];
+} uint32x4x3_t;
+
+typedef struct uint64x1x3_t
+{
+ uint64x1_t val[3];
+} uint64x1x3_t;
+
+typedef struct uint64x2x3_t
+{
+ uint64x2_t val[3];
+} uint64x2x3_t;
+
+typedef struct float32x2x3_t
+{
+ float32x2_t val[3];
+} float32x2x3_t;
+
+typedef struct float32x4x3_t
+{
+ float32x4_t val[3];
+} float32x4x3_t;
+
+typedef struct poly8x8x3_t
+{
+ poly8x8_t val[3];
+} poly8x8x3_t;
+
+typedef struct poly8x16x3_t
+{
+ poly8x16_t val[3];
+} poly8x16x3_t;
+
+typedef struct poly16x4x3_t
+{
+ poly16x4_t val[3];
+} poly16x4x3_t;
+
+typedef struct poly16x8x3_t
+{
+ poly16x8_t val[3];
+} poly16x8x3_t;
+
+typedef struct int8x8x4_t
+{
+ int8x8_t val[4];
+} int8x8x4_t;
+
+typedef struct int8x16x4_t
+{
+ int8x16_t val[4];
+} int8x16x4_t;
+
+typedef struct int16x4x4_t
+{
+ int16x4_t val[4];
+} int16x4x4_t;
+
+typedef struct int16x8x4_t
+{
+ int16x8_t val[4];
+} int16x8x4_t;
+
+typedef struct int32x2x4_t
+{
+ int32x2_t val[4];
+} int32x2x4_t;
+
+typedef struct int32x4x4_t
+{
+ int32x4_t val[4];
+} int32x4x4_t;
+
+typedef struct int64x1x4_t
+{
+ int64x1_t val[4];
+} int64x1x4_t;
+
+typedef struct int64x2x4_t
+{
+ int64x2_t val[4];
+} int64x2x4_t;
+
+typedef struct uint8x8x4_t
+{
+ uint8x8_t val[4];
+} uint8x8x4_t;
+
+typedef struct uint8x16x4_t
+{
+ uint8x16_t val[4];
+} uint8x16x4_t;
+
+typedef struct uint16x4x4_t
+{
+ uint16x4_t val[4];
+} uint16x4x4_t;
+
+typedef struct uint16x8x4_t
+{
+ uint16x8_t val[4];
+} uint16x8x4_t;
+
+typedef struct uint32x2x4_t
+{
+ uint32x2_t val[4];
+} uint32x2x4_t;
+
+typedef struct uint32x4x4_t
+{
+ uint32x4_t val[4];
+} uint32x4x4_t;
+
+typedef struct uint64x1x4_t
+{
+ uint64x1_t val[4];
+} uint64x1x4_t;
+
+typedef struct uint64x2x4_t
+{
+ uint64x2_t val[4];
+} uint64x2x4_t;
+
+typedef struct float32x2x4_t
+{
+ float32x2_t val[4];
+} float32x2x4_t;
+
+typedef struct float32x4x4_t
+{
+ float32x4_t val[4];
+} float32x4x4_t;
+
+typedef struct poly8x8x4_t
+{
+ poly8x8_t val[4];
+} poly8x8x4_t;
+
+typedef struct poly8x16x4_t
+{
+ poly8x16_t val[4];
+} poly8x16x4_t;
+
+typedef struct poly16x4x4_t
+{
+ poly16x4_t val[4];
+} poly16x4x4_t;
+
+typedef struct poly16x8x4_t
+{
+ poly16x8_t val[4];
+} poly16x8x4_t;
+
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vaddv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vaddv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vaddv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vadd_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vaddv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vaddv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vadd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vadddi (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vadd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vadddi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vaddv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vaddv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vaddv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vaddv2di (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vaddq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vaddv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vaddv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vaddv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vaddv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vaddv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vaddlv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vaddlv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vaddlv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vaddlv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vaddlv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vaddlv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddw_s8 (int16x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vaddwv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddw_s16 (int32x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vaddwv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddw_s32 (int64x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vaddwv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddw_u8 (uint16x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vaddwv8qi ((int16x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddw_u16 (uint32x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vaddwv4hi ((int32x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddw_u32 (uint64x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vaddwv2si ((int64x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vhadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vhaddv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vhadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vhaddv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vhadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vhaddv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vhadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vhaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vhadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vhaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vhadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vhaddv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vhaddv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vhaddv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vhaddv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vhaddv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vhaddv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vhaddv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrhadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vhaddv8qi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrhadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vhaddv4hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrhadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vhaddv2si (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrhadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vhaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrhadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vhaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrhadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vhaddv2si ((int32x2_t) __a, (int32x2_t) __b, 4);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vhaddv16qi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vhaddv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vhaddv4si (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vhaddv16qi ((int8x16_t) __a, (int8x16_t) __b, 4);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vhaddv8hi ((int16x8_t) __a, (int16x8_t) __b, 4);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vhaddv4si ((int32x4_t) __a, (int32x4_t) __b, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vqaddv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqaddv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqaddv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqadd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vqadddi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vqaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vqaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vqaddv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqadd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vqadddi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vqaddv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqaddv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqaddv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqaddq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqaddv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vqaddv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vqaddv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vqaddv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqaddq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vqaddv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vaddhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vaddhnv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vaddhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vaddhnv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vaddhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vaddhnv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vaddhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vaddhnv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vaddhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vaddhnv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vaddhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vaddhnv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vraddhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vaddhnv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vraddhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vaddhnv4si (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vraddhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vaddhnv2di (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vraddhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vaddhnv8hi ((int16x8_t) __a, (int16x8_t) __b, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vraddhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vaddhnv4si ((int32x4_t) __a, (int32x4_t) __b, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vraddhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vaddhnv2di ((int64x2_t) __a, (int64x2_t) __b, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmul_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vmulv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vmulv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vmulv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vmulv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmul_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vmulv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vmulv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vmulv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmul_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (poly8x8_t)__builtin_neon_vmulv8qi ((int8x8_t) __a, (int8x8_t) __b, 2);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmulq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vmulv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vmulv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmulv4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vmulv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmulq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vmulv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vmulv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmulv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmulq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (poly8x16_t)__builtin_neon_vmulv16qi ((int8x16_t) __a, (int8x16_t) __b, 2);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqdmulhv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqdmulhv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqdmulhv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmulhv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqdmulhv4hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqdmulhv2si (__a, __b, 5);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqdmulhv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmulhv4si (__a, __b, 5);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmull_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vmullv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmullv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vmullv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmull_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vmullv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmullv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vmullv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vmull_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (poly16x8_t)__builtin_neon_vmullv8qi ((int8x8_t) __a, (int8x8_t) __b, 2);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmullv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqdmullv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmla_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vmlav8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmla_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vmlav4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmla_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vmlav2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmla_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return (float32x2_t)__builtin_neon_vmlav2sf (__a, __b, __c, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmla_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vmlav8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmla_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vmlav4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmla_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vmlav2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmlaq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return (int8x16_t)__builtin_neon_vmlav16qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlaq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmlav8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlaq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlav4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return (float32x4_t)__builtin_neon_vmlav4sf (__a, __b, __c, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmlaq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return (uint8x16_t)__builtin_neon_vmlav16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlaq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmlav8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlaq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlav4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlal_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmlalv8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlalv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vmlalv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlal_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmlalv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlalv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vmlalv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmlalv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmlalv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmls_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vmlsv8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmls_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vmlsv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmls_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vmlsv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmls_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return (float32x2_t)__builtin_neon_vmlsv2sf (__a, __b, __c, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmls_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vmlsv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmls_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vmlsv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmls_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vmlsv2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmlsq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return (int8x16_t)__builtin_neon_vmlsv16qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmlsv8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlsv4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return (float32x4_t)__builtin_neon_vmlsv4sf (__a, __b, __c, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmlsq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return (uint8x16_t)__builtin_neon_vmlsv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmlsv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlsv4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsl_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmlslv8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlslv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vmlslv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsl_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmlslv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlslv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vmlslv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmlslv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmlslv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vsubv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vsubv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vsubv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vsub_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vsubv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vsubv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vsubv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vsubv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsub_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vsubdi (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsub_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vsubdi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vsubv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vsubv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vsubv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vsubv2di (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vsubq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vsubv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vsubv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vsubv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vsubv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vsubv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vsublv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vsublv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vsublv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vsublv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vsublv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vsublv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubw_s8 (int16x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vsubwv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubw_s16 (int32x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vsubwv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubw_s32 (int64x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vsubwv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubw_u8 (uint16x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vsubwv8qi ((int16x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubw_u16 (uint32x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vsubwv4hi ((int32x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubw_u32 (uint64x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vsubwv2si ((int64x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vhsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vhsubv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vhsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vhsubv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vhsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vhsubv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vhsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vhsubv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vhsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vhsubv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vhsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vhsubv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vhsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vhsubv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vhsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vhsubv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vhsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vhsubv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vhsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vhsubv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vhsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vhsubv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vhsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vhsubv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vqsubv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqsubv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqsubv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqsub_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vqsubdi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vqsubv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vqsubv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vqsubv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqsub_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vqsubdi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vqsubv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqsubv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqsubv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqsubq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqsubv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vqsubv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vqsubv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vqsubv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqsubq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vqsubv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsubhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vsubhnv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsubhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vsubhnv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsubhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vsubhnv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsubhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vsubhnv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsubhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vsubhnv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsubhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vsubhnv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrsubhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vsubhnv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrsubhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vsubhnv4si (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrsubhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vsubhnv2di (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrsubhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vsubhnv8hi ((int16x8_t) __a, (int16x8_t) __b, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrsubhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vsubhnv4si ((int32x4_t) __a, (int32x4_t) __b, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsubhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vsubhnv2di ((int64x2_t) __a, (int64x2_t) __b, 4);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vceqv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vceq_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vceqv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vceqv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vceqv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vceqv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vceq_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vceqv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vceqv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vceqv8qi ((int8x8_t) __a, (int8x8_t) __b, 2);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vceqv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vceqq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vceqv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vceqv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vceqv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vceqv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vceqq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vceqv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vceqv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vceqv16qi ((int8x16_t) __a, (int8x16_t) __b, 2);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcge_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgev8qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcge_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgev4hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgev2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgev2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcge_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgeuv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcge_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgeuv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgeuv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgeq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgev16qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgeq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgev8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgev4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgev4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgeq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgeuv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgeq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgeuv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgeuv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcle_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgev8qi (__b, __a, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcle_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgev4hi (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgev2si (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgev2sf (__b, __a, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcle_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgeuv8qi ((int8x8_t) __b, (int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcle_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgeuv4hi ((int16x4_t) __b, (int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgeuv2si ((int32x2_t) __b, (int32x2_t) __a, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcleq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgev16qi (__b, __a, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcleq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgev8hi (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgev4si (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgev4sf (__b, __a, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcleq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgeuv16qi ((int8x16_t) __b, (int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcleq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgeuv8hi ((int16x8_t) __b, (int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgeuv4si ((int32x4_t) __b, (int32x4_t) __a, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgt_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgtv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcgt_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgtv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgt_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgtuv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcgt_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgtuv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtuv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgtq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgtv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgtq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgtv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgtq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgtuv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgtq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgtuv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtuv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclt_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgtv8qi (__b, __a, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclt_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgtv4hi (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtv2si (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtv2sf (__b, __a, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclt_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgtuv8qi ((int8x8_t) __b, (int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclt_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgtuv4hi ((int16x4_t) __b, (int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtuv2si ((int32x2_t) __b, (int32x2_t) __a, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcltq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgtv16qi (__b, __a, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcltq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgtv8hi (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtv4si (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtv4sf (__b, __a, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcltq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgtuv16qi ((int8x16_t) __b, (int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcltq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgtuv8hi ((int16x8_t) __b, (int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtuv4si ((int32x4_t) __b, (int32x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcage_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcagev2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcageq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcagev4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcale_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcagev2sf (__b, __a, 3);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcaleq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcagev4sf (__b, __a, 3);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcagt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcagtv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcagtq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcagtv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcalt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcagtv2sf (__b, __a, 3);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcaltq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcagtv4sf (__b, __a, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vtstv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtst_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vtstv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtst_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vtstv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vtstv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtst_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vtstv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtst_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vtstv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vtstv8qi ((int8x8_t) __a, (int8x8_t) __b, 2);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vtstv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtstq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vtstv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtstq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vtstv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vtstv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtstq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vtstv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtstq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vtstv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vtstv16qi ((int8x16_t) __a, (int8x16_t) __b, 2);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vabd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vabdv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vabd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vabdv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vabd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vabdv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vabd_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vabdv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vabd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vabdv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vabd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vabdv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vabd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vabdv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabdq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vabdv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabdq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vabdv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabdq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vabdv4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vabdq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vabdv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vabdq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vabdv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabdq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vabdv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabdq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vabdv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabdl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vabdlv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabdl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vabdlv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabdl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vabdlv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabdl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vabdlv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabdl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vabdlv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabdl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vabdlv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vaba_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vabav8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vaba_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vabav4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vaba_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vabav2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vaba_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vabav8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vaba_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vabav4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vaba_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vabav2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabaq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return (int8x16_t)__builtin_neon_vabav16qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabaq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vabav8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabaq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vabav4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vabaq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return (uint8x16_t)__builtin_neon_vabav16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabaq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vabav8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabaq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vabav4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabal_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vabalv8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vabalv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vabalv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabal_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vabalv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabal_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vabalv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabal_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vabalv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmax_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vmaxv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmax_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vmaxv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmax_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vmaxv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmax_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vmaxv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmax_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vmaxv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmax_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vmaxv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmax_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vmaxv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmaxq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vmaxv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmaxq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vmaxv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmaxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmaxv4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmaxq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vmaxv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmaxq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vmaxv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmaxq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vmaxv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmaxv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmin_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vminv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmin_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vminv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmin_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vminv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmin_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vminv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmin_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vminv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmin_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vminv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmin_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vminv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vminq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vminv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vminq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vminv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vminq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vminv4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vminq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vminv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vminq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vminv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vminq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vminv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vminq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vminv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vpaddv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vpaddv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vpaddv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpadd_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vpaddv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vpaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vpaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vpaddv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpaddl_s8 (int8x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vpaddlv8qi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpaddl_s16 (int16x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vpaddlv4hi (__a, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vpaddl_s32 (int32x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vpaddlv2si (__a, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpaddl_u8 (uint8x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vpaddlv8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpaddl_u16 (uint16x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vpaddlv4hi ((int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vpaddl_u32 (uint32x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vpaddlv2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpaddlq_s8 (int8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vpaddlv16qi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpaddlq_s16 (int16x8_t __a)
+{
+ return (int32x4_t)__builtin_neon_vpaddlv8hi (__a, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vpaddlq_s32 (int32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vpaddlv4si (__a, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpaddlq_u8 (uint8x16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vpaddlv16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpaddlq_u16 (uint16x8_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vpaddlv8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vpaddlq_u32 (uint32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vpaddlv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpadal_s8 (int16x4_t __a, int8x8_t __b)
+{
+ return (int16x4_t)__builtin_neon_vpadalv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpadal_s16 (int32x2_t __a, int16x4_t __b)
+{
+ return (int32x2_t)__builtin_neon_vpadalv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vpadal_s32 (int64x1_t __a, int32x2_t __b)
+{
+ return (int64x1_t)__builtin_neon_vpadalv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpadal_u8 (uint16x4_t __a, uint8x8_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vpadalv8qi ((int16x4_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpadal_u16 (uint32x2_t __a, uint16x4_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vpadalv4hi ((int32x2_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vpadal_u32 (uint64x1_t __a, uint32x2_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vpadalv2si ((int64x1_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpadalq_s8 (int16x8_t __a, int8x16_t __b)
+{
+ return (int16x8_t)__builtin_neon_vpadalv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpadalq_s16 (int32x4_t __a, int16x8_t __b)
+{
+ return (int32x4_t)__builtin_neon_vpadalv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vpadalq_s32 (int64x2_t __a, int32x4_t __b)
+{
+ return (int64x2_t)__builtin_neon_vpadalv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpadalq_u8 (uint16x8_t __a, uint8x16_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vpadalv16qi ((int16x8_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpadalq_u16 (uint32x4_t __a, uint16x8_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vpadalv8hi ((int32x4_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vpadalq_u32 (uint64x2_t __a, uint32x4_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vpadalv4si ((int64x2_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpmax_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vpmaxv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpmax_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vpmaxv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpmax_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vpmaxv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpmax_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vpmaxv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpmax_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vpmaxv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpmax_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vpmaxv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpmax_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vpmaxv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpmin_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vpminv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpmin_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vpminv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpmin_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vpminv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpmin_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vpminv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpmin_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vpminv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpmin_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vpminv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpmin_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vpminv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrecps_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vrecpsv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrecpsq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vrecpsv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrsqrts_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vrsqrtsv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrsqrtsq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vrsqrtsv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vshlv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vshlv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vshlv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vshldi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vshlv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vshlv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vshlv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vshldi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vshlv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vshlv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vshlv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vshlv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vshlv16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vshlv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vshlv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vshlv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vshlv8qi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vshlv4hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vshlv2si (__a, __b, 5);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vshldi (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vshlv8qi ((int8x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vshlv4hi ((int16x4_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vshlv2si ((int32x2_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vshldi ((int64x1_t) __a, __b, 4);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vshlv16qi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vshlv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vshlv4si (__a, __b, 5);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vshlv2di (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vshlv16qi ((int8x16_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vshlv8hi ((int16x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vshlv4si ((int32x4_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vshlv2di ((int64x2_t) __a, __b, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vqshlv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqshlv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqshlv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vqshldi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshlv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshlv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshlv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vqshldi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vqshlv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqshlv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqshlv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqshlv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vqshlv16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vqshlv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vqshlv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vqshlv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqrshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vqshlv8qi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqshlv4hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqshlv2si (__a, __b, 5);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqrshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vqshldi (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshlv8qi ((int8x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshlv4hi ((int16x4_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshlv2si ((int32x2_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqrshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vqshldi ((int64x1_t) __a, __b, 4);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vqshlv16qi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqshlv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqshlv4si (__a, __b, 5);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqrshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqshlv2di (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vqshlv16qi ((int8x16_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vqshlv8hi ((int16x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vqshlv4si ((int32x4_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqrshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vqshlv2di ((int64x2_t) __a, __b, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshr_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshr_nv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshr_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshr_nv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshr_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshr_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshr_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vshr_ndi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshr_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshr_nv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshr_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshr_nv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshr_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshr_nv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshr_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vshr_ndi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshrq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vshr_nv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshrq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vshr_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshrq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vshr_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshrq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vshr_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshrq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vshr_nv16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshrq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vshr_nv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshrq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vshr_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshrq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vshr_nv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrshr_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshr_nv8qi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrshr_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshr_nv4hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrshr_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshr_nv2si (__a, __b, 5);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshr_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vshr_ndi (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrshr_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshr_nv8qi ((int8x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrshr_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshr_nv4hi ((int16x4_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrshr_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshr_nv2si ((int32x2_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshr_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vshr_ndi ((int64x1_t) __a, __b, 4);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrshrq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vshr_nv16qi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrshrq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vshr_nv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrshrq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vshr_nv4si (__a, __b, 5);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrshrq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vshr_nv2di (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrshrq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vshr_nv16qi ((int8x16_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrshrq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vshr_nv8hi ((int16x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrshrq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vshr_nv4si ((int32x4_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrshrq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vshr_nv2di ((int64x2_t) __a, __b, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshrn_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshrn_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshrn_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshrn_nv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshrn_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshrn_nv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshrn_nv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshrn_nv4si (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshrn_nv2di (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshrn_nv8hi ((int16x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshrn_nv4si ((int32x4_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshrn_nv2di ((int64x2_t) __a, __b, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vqshrn_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vqshrn_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vqshrn_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshrn_nv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshrn_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshrn_nv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqrshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vqshrn_nv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vqshrn_nv4si (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vqshrn_nv2di (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshrn_nv8hi ((int16x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshrn_nv4si ((int32x4_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshrn_nv2di ((int64x2_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshrun_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshrun_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshrun_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshrun_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshrun_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshrun_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshrun_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshrun_nv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshrun_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshrun_nv4si (__a, __b, 5);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshrun_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshrun_nv2di (__a, __b, 5);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshl_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshl_nv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshl_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshl_nv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshl_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshl_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshl_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vshl_ndi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshl_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshl_nv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshl_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshl_nv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshl_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshl_nv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshl_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vshl_ndi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshlq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vshl_nv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshlq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vshl_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshlq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vshl_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshlq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vshl_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshlq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vshl_nv16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshlq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vshl_nv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshlq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vshl_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshlq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vshl_nv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshl_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vqshl_nv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshl_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vqshl_nv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshl_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vqshl_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshl_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vqshl_ndi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshl_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshl_nv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshl_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshl_nv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshl_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshl_nv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshl_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vqshl_ndi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqshlq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vqshl_nv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqshlq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vqshl_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqshlq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vqshl_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqshlq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vqshl_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshlq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vqshl_nv16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshlq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vqshl_nv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshlq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vqshl_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshlq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vqshl_nv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshlu_n_s8 (int8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshlu_nv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshlu_n_s16 (int16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshlu_nv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshlu_n_s32 (int32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshlu_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshlu_n_s64 (int64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vqshlu_ndi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshluq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vqshlu_nv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshluq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vqshlu_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshluq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vqshlu_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshluq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vqshlu_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshll_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vshll_nv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshll_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vshll_nv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshll_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vshll_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshll_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vshll_nv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshll_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vshll_nv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshll_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vshll_nv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vsra_nv8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vsra_nv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vsra_nv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vsra_ndi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vsra_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vsra_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vsra_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vsra_ndi ((int64x1_t) __a, (int64x1_t) __b, __c, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vsra_nv16qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vsra_nv8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vsra_nv4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vsra_nv2di (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vsra_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vsra_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vsra_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vsra_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vsra_nv8qi (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vsra_nv4hi (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vsra_nv2si (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vsra_ndi (__a, __b, __c, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vsra_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vsra_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vsra_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c, 4);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vsra_ndi ((int64x1_t) __a, (int64x1_t) __b, __c, 4);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vsra_nv16qi (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vsra_nv8hi (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vsra_nv4si (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vsra_nv2di (__a, __b, __c, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vsra_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c, 4);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vsra_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c, 4);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vsra_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c, 4);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vsra_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsri_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vsri_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsri_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vsri_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsri_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vsri_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsri_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vsri_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsri_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vsri_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsri_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vsri_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsri_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vsri_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsri_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vsri_ndi ((int64x1_t) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vsri_n_p8 (poly8x8_t __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vsri_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vsri_n_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vsri_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vsri_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vsri_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vsri_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsriq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vsri_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vsri_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vsri_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vsri_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsriq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vsri_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vsriq_n_p8 (poly8x16_t __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vsri_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vsriq_n_p16 (poly16x8_t __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vsri_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsli_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vsli_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsli_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vsli_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsli_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vsli_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsli_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vsli_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsli_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vsli_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsli_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vsli_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsli_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vsli_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsli_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vsli_ndi ((int64x1_t) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vsli_n_p8 (poly8x8_t __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vsli_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vsli_n_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vsli_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vsli_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vsli_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vsli_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsliq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vsli_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vsli_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vsli_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vsli_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsliq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vsli_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vsliq_n_p8 (poly8x16_t __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vsli_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vsliq_n_p16 (poly16x8_t __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vsli_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vabs_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vabsv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vabs_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vabsv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vabs_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vabsv2si (__a, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vabs_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vabsv2sf (__a, 3);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabsq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vabsv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabsq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vabsv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabsq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vabsv4si (__a, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vabsq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vabsv4sf (__a, 3);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqabs_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vqabsv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqabs_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vqabsv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqabs_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vqabsv2si (__a, 1);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqabsq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vqabsv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqabsq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vqabsv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqabsq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vqabsv4si (__a, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vneg_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vnegv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vneg_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vnegv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vneg_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vnegv2si (__a, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vneg_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vnegv2sf (__a, 3);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vnegq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vnegv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vnegq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vnegv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vnegq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vnegv4si (__a, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vnegq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vnegv4sf (__a, 3);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqneg_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vqnegv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqneg_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vqnegv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqneg_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vqnegv2si (__a, 1);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqnegq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vqnegv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqnegq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vqnegv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqnegq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vqnegv4si (__a, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmvn_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vmvnv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmvn_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vmvnv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmvn_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vmvnv2si (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmvn_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vmvnv8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmvn_u16 (uint16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vmvnv4hi ((int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmvn_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vmvnv2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmvn_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vmvnv8qi ((int8x8_t) __a, 2);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmvnq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vmvnv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmvnq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vmvnv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmvnq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vmvnv4si (__a, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmvnq_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vmvnv16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmvnq_u16 (uint16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vmvnv8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmvnq_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vmvnv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmvnq_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vmvnv16qi ((int8x16_t) __a, 2);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcls_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vclsv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vcls_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vclsv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcls_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vclsv2si (__a, 1);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vclsq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vclsv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vclsq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vclsv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vclsq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vclsv4si (__a, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vclz_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vclzv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vclz_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vclzv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vclz_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vclzv2si (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclz_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vclzv8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclz_u16 (uint16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vclzv4hi ((int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclz_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vclzv2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vclzq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vclzv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vclzq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vclzv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vclzq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vclzv4si (__a, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vclzq_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vclzv16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vclzq_u16 (uint16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vclzv8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vclzq_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vclzv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcnt_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vcntv8qi (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcnt_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vcntv8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vcnt_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vcntv8qi ((int8x8_t) __a, 2);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vcntq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vcntv16qi (__a, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcntq_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vcntv16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vcntq_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vcntv16qi ((int8x16_t) __a, 2);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrecpe_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrecpev2sf (__a, 3);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrecpe_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vrecpev2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrecpeq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrecpev4sf (__a, 3);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrecpeq_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vrecpev4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrsqrte_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrsqrtev2sf (__a, 3);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsqrte_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vrsqrtev2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrsqrteq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrsqrtev4sf (__a, 3);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrsqrteq_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vrsqrtev4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vget_lane_s8 (int8x8_t __a, const int __b)
+{
+ return (int8_t)__builtin_neon_vget_lanev8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vget_lane_s16 (int16x4_t __a, const int __b)
+{
+ return (int16_t)__builtin_neon_vget_lanev4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vget_lane_s32 (int32x2_t __a, const int __b)
+{
+ return (int32_t)__builtin_neon_vget_lanev2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vget_lane_f32 (float32x2_t __a, const int __b)
+{
+ return (float32_t)__builtin_neon_vget_lanev2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vget_lane_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8_t)__builtin_neon_vget_lanev8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vget_lane_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16_t)__builtin_neon_vget_lanev4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vget_lane_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32_t)__builtin_neon_vget_lanev2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+vget_lane_p8 (poly8x8_t __a, const int __b)
+{
+ return (poly8_t)__builtin_neon_vget_lanev8qi ((int8x8_t) __a, __b, 2);
+}
+
+__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+vget_lane_p16 (poly16x4_t __a, const int __b)
+{
+ return (poly16_t)__builtin_neon_vget_lanev4hi ((int16x4_t) __a, __b, 2);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vget_lane_s64 (int64x1_t __a, const int __b)
+{
+ return (int64_t)__builtin_neon_vget_lanedi (__a, __b, 1);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vget_lane_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64_t)__builtin_neon_vget_lanedi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vgetq_lane_s8 (int8x16_t __a, const int __b)
+{
+ return (int8_t)__builtin_neon_vget_lanev16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vgetq_lane_s16 (int16x8_t __a, const int __b)
+{
+ return (int16_t)__builtin_neon_vget_lanev8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vgetq_lane_s32 (int32x4_t __a, const int __b)
+{
+ return (int32_t)__builtin_neon_vget_lanev4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vgetq_lane_f32 (float32x4_t __a, const int __b)
+{
+ return (float32_t)__builtin_neon_vget_lanev4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vgetq_lane_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8_t)__builtin_neon_vget_lanev16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vgetq_lane_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16_t)__builtin_neon_vget_lanev8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vgetq_lane_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32_t)__builtin_neon_vget_lanev4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+vgetq_lane_p8 (poly8x16_t __a, const int __b)
+{
+ return (poly8_t)__builtin_neon_vget_lanev16qi ((int8x16_t) __a, __b, 2);
+}
+
+__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+vgetq_lane_p16 (poly16x8_t __a, const int __b)
+{
+ return (poly16_t)__builtin_neon_vget_lanev8hi ((int16x8_t) __a, __b, 2);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vgetq_lane_s64 (int64x2_t __a, const int __b)
+{
+ return (int64_t)__builtin_neon_vget_lanev2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vgetq_lane_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64_t)__builtin_neon_vget_lanev2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vset_lane_s8 (int8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vset_lanev8qi ((__builtin_neon_qi) __a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vset_lane_s16 (int16_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vset_lanev4hi ((__builtin_neon_hi) __a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vset_lane_s32 (int32_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vset_lanev2si ((__builtin_neon_si) __a, __b, __c);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vset_lane_f32 (float32_t __a, float32x2_t __b, const int __c)
+{
+ return (float32x2_t)__builtin_neon_vset_lanev2sf ((__builtin_neon_sf) __a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vset_lane_u8 (uint8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vset_lanev8qi ((__builtin_neon_qi) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vset_lane_u16 (uint16_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vset_lanev4hi ((__builtin_neon_hi) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vset_lane_u32 (uint32_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vset_lanev2si ((__builtin_neon_si) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vset_lane_p8 (poly8_t __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vset_lanev8qi ((__builtin_neon_qi) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vset_lane_p16 (poly16_t __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vset_lanev4hi ((__builtin_neon_hi) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vset_lane_s64 (int64_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vset_lanedi ((__builtin_neon_di) __a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vset_lane_u64 (uint64_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vset_lanedi ((__builtin_neon_di) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsetq_lane_s8 (int8_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vset_lanev16qi ((__builtin_neon_qi) __a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsetq_lane_s16 (int16_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vset_lanev8hi ((__builtin_neon_hi) __a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsetq_lane_s32 (int32_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vset_lanev4si ((__builtin_neon_si) __a, __b, __c);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vsetq_lane_f32 (float32_t __a, float32x4_t __b, const int __c)
+{
+ return (float32x4_t)__builtin_neon_vset_lanev4sf ((__builtin_neon_sf) __a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsetq_lane_u8 (uint8_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vset_lanev16qi ((__builtin_neon_qi) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsetq_lane_u16 (uint16_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vset_lanev8hi ((__builtin_neon_hi) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsetq_lane_u32 (uint32_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vset_lanev4si ((__builtin_neon_si) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vsetq_lane_p8 (poly8_t __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vset_lanev16qi ((__builtin_neon_qi) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vsetq_lane_p16 (poly16_t __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vset_lanev8hi ((__builtin_neon_hi) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsetq_lane_s64 (int64_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vset_lanev2di ((__builtin_neon_di) __a, __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsetq_lane_u64 (uint64_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vset_lanev2di ((__builtin_neon_di) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcreate_s8 (uint64_t __a)
+{
+ return (int8x8_t)__builtin_neon_vcreatev8qi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vcreate_s16 (uint64_t __a)
+{
+ return (int16x4_t)__builtin_neon_vcreatev4hi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcreate_s32 (uint64_t __a)
+{
+ return (int32x2_t)__builtin_neon_vcreatev2si ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vcreate_s64 (uint64_t __a)
+{
+ return (int64x1_t)__builtin_neon_vcreatedi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcreate_f32 (uint64_t __a)
+{
+ return (float32x2_t)__builtin_neon_vcreatev2sf ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcreate_u8 (uint64_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vcreatev8qi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcreate_u16 (uint64_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vcreatev4hi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcreate_u32 (uint64_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vcreatev2si ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcreate_u64 (uint64_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vcreatedi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vcreate_p8 (uint64_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vcreatev8qi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vcreate_p16 (uint64_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vcreatev4hi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vdup_n_s8 (int8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vdup_n_s16 (int16_t __a)
+{
+ return (int16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vdup_n_s32 (int32_t __a)
+{
+ return (int32x2_t)__builtin_neon_vdup_nv2si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vdup_n_f32 (float32_t __a)
+{
+ return (float32x2_t)__builtin_neon_vdup_nv2sf ((__builtin_neon_sf) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vdup_n_u8 (uint8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vdup_n_u16 (uint16_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vdup_n_u32 (uint32_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vdup_nv2si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vdup_n_p8 (poly8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vdup_n_p16 (poly16_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vdup_n_s64 (int64_t __a)
+{
+ return (int64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vdup_n_u64 (uint64_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vdupq_n_s8 (int8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vdupq_n_s16 (int16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vdupq_n_s32 (int32_t __a)
+{
+ return (int32x4_t)__builtin_neon_vdup_nv4si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vdupq_n_f32 (float32_t __a)
+{
+ return (float32x4_t)__builtin_neon_vdup_nv4sf ((__builtin_neon_sf) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vdupq_n_u8 (uint8_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vdupq_n_u16 (uint16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vdupq_n_u32 (uint32_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vdup_nv4si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vdupq_n_p8 (poly8_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vdupq_n_p16 (poly16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vdupq_n_s64 (int64_t __a)
+{
+ return (int64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vdupq_n_u64 (uint64_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmov_n_s8 (int8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmov_n_s16 (int16_t __a)
+{
+ return (int16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmov_n_s32 (int32_t __a)
+{
+ return (int32x2_t)__builtin_neon_vdup_nv2si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmov_n_f32 (float32_t __a)
+{
+ return (float32x2_t)__builtin_neon_vdup_nv2sf ((__builtin_neon_sf) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmov_n_u8 (uint8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmov_n_u16 (uint16_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmov_n_u32 (uint32_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vdup_nv2si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmov_n_p8 (poly8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vmov_n_p16 (poly16_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vmov_n_s64 (int64_t __a)
+{
+ return (int64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vmov_n_u64 (uint64_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmovq_n_s8 (int8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovq_n_s16 (int16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovq_n_s32 (int32_t __a)
+{
+ return (int32x4_t)__builtin_neon_vdup_nv4si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmovq_n_f32 (float32_t __a)
+{
+ return (float32x4_t)__builtin_neon_vdup_nv4sf ((__builtin_neon_sf) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmovq_n_u8 (uint8_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovq_n_u16 (uint16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovq_n_u32 (uint32_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vdup_nv4si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmovq_n_p8 (poly8_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vmovq_n_p16 (poly16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmovq_n_s64 (int64_t __a)
+{
+ return (int64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmovq_n_u64 (uint64_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vdup_lane_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vdup_lanev8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vdup_lane_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vdup_lanev4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vdup_lane_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vdup_lanev2si (__a, __b);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vdup_lane_f32 (float32x2_t __a, const int __b)
+{
+ return (float32x2_t)__builtin_neon_vdup_lanev2sf (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vdup_lane_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vdup_lanev8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vdup_lane_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vdup_lanev4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vdup_lane_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vdup_lanev2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vdup_lane_p8 (poly8x8_t __a, const int __b)
+{
+ return (poly8x8_t)__builtin_neon_vdup_lanev8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vdup_lane_p16 (poly16x4_t __a, const int __b)
+{
+ return (poly16x4_t)__builtin_neon_vdup_lanev4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vdup_lane_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vdup_lanedi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vdup_lane_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vdup_lanedi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vdupq_lane_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vdup_lanev16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vdupq_lane_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vdup_lanev8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vdupq_lane_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vdup_lanev4si (__a, __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vdupq_lane_f32 (float32x2_t __a, const int __b)
+{
+ return (float32x4_t)__builtin_neon_vdup_lanev4sf (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vdupq_lane_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vdup_lanev16qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vdupq_lane_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vdup_lanev8hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vdupq_lane_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vdup_lanev4si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vdupq_lane_p8 (poly8x8_t __a, const int __b)
+{
+ return (poly8x16_t)__builtin_neon_vdup_lanev16qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vdupq_lane_p16 (poly16x4_t __a, const int __b)
+{
+ return (poly16x8_t)__builtin_neon_vdup_lanev8hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vdupq_lane_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vdup_lanev2di (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vdupq_lane_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vdup_lanev2di ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vcombine_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x16_t)__builtin_neon_vcombinev8qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vcombine_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x8_t)__builtin_neon_vcombinev4hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcombine_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x4_t)__builtin_neon_vcombinev2si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcombine_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x2_t)__builtin_neon_vcombinedi (__a, __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcombine_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x4_t)__builtin_neon_vcombinev2sf (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcombine_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcombinev8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcombine_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcombinev4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcombine_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcombinev2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcombine_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vcombinedi ((int64x1_t) __a, (int64x1_t) __b);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vcombine_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (poly8x16_t)__builtin_neon_vcombinev8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vcombine_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ return (poly16x8_t)__builtin_neon_vcombinev4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vget_high_s8 (int8x16_t __a)
+{
+ return (int8x8_t)__builtin_neon_vget_highv16qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vget_high_s16 (int16x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vget_highv8hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vget_high_s32 (int32x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vget_highv4si (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vget_high_s64 (int64x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vget_highv2di (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vget_high_f32 (float32x4_t __a)
+{
+ return (float32x2_t)__builtin_neon_vget_highv4sf (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vget_high_u8 (uint8x16_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vget_highv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vget_high_u16 (uint16x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vget_highv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vget_high_u32 (uint32x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vget_highv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vget_high_u64 (uint64x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vget_highv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vget_high_p8 (poly8x16_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vget_highv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vget_high_p16 (poly16x8_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vget_highv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vget_low_s8 (int8x16_t __a)
+{
+ return (int8x8_t)__builtin_neon_vget_lowv16qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vget_low_s16 (int16x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vget_lowv8hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vget_low_s32 (int32x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vget_lowv4si (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vget_low_f32 (float32x4_t __a)
+{
+ return (float32x2_t)__builtin_neon_vget_lowv4sf (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vget_low_u8 (uint8x16_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vget_lowv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vget_low_u16 (uint16x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vget_lowv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vget_low_u32 (uint32x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vget_lowv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vget_low_p8 (poly8x16_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vget_lowv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vget_low_p16 (poly16x8_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vget_lowv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vget_low_s64 (int64x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vget_lowv2di (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vget_low_u64 (uint64x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vget_lowv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvt_s32_f32 (float32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vcvtv2sf (__a, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_f32_s32 (int32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vcvtv2si (__a, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_f32_u32 (uint32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vcvtv2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvt_u32_f32 (float32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vcvtv2sf (__a, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtq_s32_f32 (float32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vcvtv4sf (__a, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_f32_s32 (int32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vcvtv4si (__a, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_f32_u32 (uint32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vcvtv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtq_u32_f32 (float32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vcvtv4sf (__a, 0);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvt_n_s32_f32 (float32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vcvt_nv2sf (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_n_f32_s32 (int32x2_t __a, const int __b)
+{
+ return (float32x2_t)__builtin_neon_vcvt_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_n_f32_u32 (uint32x2_t __a, const int __b)
+{
+ return (float32x2_t)__builtin_neon_vcvt_nv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvt_n_u32_f32 (float32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vcvt_nv2sf (__a, __b, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtq_n_s32_f32 (float32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vcvt_nv4sf (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_n_f32_s32 (int32x4_t __a, const int __b)
+{
+ return (float32x4_t)__builtin_neon_vcvt_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_n_f32_u32 (uint32x4_t __a, const int __b)
+{
+ return (float32x4_t)__builtin_neon_vcvt_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtq_n_u32_f32 (float32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vcvt_nv4sf (__a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmovn_s16 (int16x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vmovnv8hi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmovn_s32 (int32x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vmovnv4si (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmovn_s64 (int64x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vmovnv2di (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmovn_u16 (uint16x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vmovnv8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmovn_u32 (uint32x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vmovnv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmovn_u64 (uint64x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vmovnv2di ((int64x2_t) __a, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqmovn_s16 (int16x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vqmovnv8hi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqmovn_s32 (int32x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vqmovnv4si (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqmovn_s64 (int64x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vqmovnv2di (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqmovn_u16 (uint16x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vqmovnv8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqmovn_u32 (uint32x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vqmovnv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqmovn_u64 (uint64x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vqmovnv2di ((int64x2_t) __a, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqmovun_s16 (int16x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vqmovunv8hi (__a, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqmovun_s32 (int32x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vqmovunv4si (__a, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqmovun_s64 (int64x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vqmovunv2di (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovl_s8 (int8x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vmovlv8qi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovl_s16 (int16x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vmovlv4hi (__a, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmovl_s32 (int32x2_t __a)
+{
+ return (int64x2_t)__builtin_neon_vmovlv2si (__a, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovl_u8 (uint8x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vmovlv8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovl_u16 (uint16x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vmovlv4hi ((int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmovl_u32 (uint32x2_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vmovlv2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl1_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vtbl1v8qi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl1_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vtbl1v8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl1_p8 (poly8x8_t __a, uint8x8_t __b)
+{
+ return (poly8x8_t)__builtin_neon_vtbl1v8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl2_s8 (int8x8x2_t __a, int8x8_t __b)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __au = { __a };
+ return (int8x8_t)__builtin_neon_vtbl2v8qi (__au.__o, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl2_u8 (uint8x8x2_t __a, uint8x8_t __b)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __au = { __a };
+ return (uint8x8_t)__builtin_neon_vtbl2v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl2_p8 (poly8x8x2_t __a, uint8x8_t __b)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __au = { __a };
+ return (poly8x8_t)__builtin_neon_vtbl2v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl3_s8 (int8x8x3_t __a, int8x8_t __b)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __au = { __a };
+ return (int8x8_t)__builtin_neon_vtbl3v8qi (__au.__o, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl3_u8 (uint8x8x3_t __a, uint8x8_t __b)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __au = { __a };
+ return (uint8x8_t)__builtin_neon_vtbl3v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl3_p8 (poly8x8x3_t __a, uint8x8_t __b)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __au = { __a };
+ return (poly8x8_t)__builtin_neon_vtbl3v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl4_s8 (int8x8x4_t __a, int8x8_t __b)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __au = { __a };
+ return (int8x8_t)__builtin_neon_vtbl4v8qi (__au.__o, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl4_u8 (uint8x8x4_t __a, uint8x8_t __b)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __au = { __a };
+ return (uint8x8_t)__builtin_neon_vtbl4v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl4_p8 (poly8x8x4_t __a, uint8x8_t __b)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __au = { __a };
+ return (poly8x8_t)__builtin_neon_vtbl4v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx1_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vtbx1v8qi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx1_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vtbx1v8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx1_p8 (poly8x8_t __a, poly8x8_t __b, uint8x8_t __c)
+{
+ return (poly8x8_t)__builtin_neon_vtbx1v8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx2_s8 (int8x8_t __a, int8x8x2_t __b, int8x8_t __c)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ return (int8x8_t)__builtin_neon_vtbx2v8qi (__a, __bu.__o, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx2_u8 (uint8x8_t __a, uint8x8x2_t __b, uint8x8_t __c)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ return (uint8x8_t)__builtin_neon_vtbx2v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx2_p8 (poly8x8_t __a, poly8x8x2_t __b, uint8x8_t __c)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ return (poly8x8_t)__builtin_neon_vtbx2v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx3_s8 (int8x8_t __a, int8x8x3_t __b, int8x8_t __c)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ return (int8x8_t)__builtin_neon_vtbx3v8qi (__a, __bu.__o, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx3_u8 (uint8x8_t __a, uint8x8x3_t __b, uint8x8_t __c)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ return (uint8x8_t)__builtin_neon_vtbx3v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx3_p8 (poly8x8_t __a, poly8x8x3_t __b, uint8x8_t __c)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ return (poly8x8_t)__builtin_neon_vtbx3v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx4_s8 (int8x8_t __a, int8x8x4_t __b, int8x8_t __c)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ return (int8x8_t)__builtin_neon_vtbx4v8qi (__a, __bu.__o, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx4_u8 (uint8x8_t __a, uint8x8x4_t __b, uint8x8_t __c)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ return (uint8x8_t)__builtin_neon_vtbx4v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx4_p8 (poly8x8_t __a, poly8x8x4_t __b, uint8x8_t __c)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ return (poly8x8_t)__builtin_neon_vtbx4v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vmul_lanev4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vmul_lanev2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_lane_f32 (float32x2_t __a, float32x2_t __b, const int __c)
+{
+ return (float32x2_t)__builtin_neon_vmul_lanev2sf (__a, __b, __c, 3);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vmul_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vmul_lanev2si ((int32x2_t) __a, (int32x2_t) __b, __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vmul_lanev8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vmul_lanev4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_lane_f32 (float32x4_t __a, float32x2_t __b, const int __c)
+{
+ return (float32x4_t)__builtin_neon_vmul_lanev4sf (__a, __b, __c, 3);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_lane_u16 (uint16x8_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vmul_lanev8hi ((int16x8_t) __a, (int16x4_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_lane_u32 (uint32x4_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vmul_lanev4si ((int32x4_t) __a, (int32x2_t) __b, __c, 0);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmla_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x4_t)__builtin_neon_vmla_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmla_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x2_t)__builtin_neon_vmla_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmla_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __d)
+{
+ return (float32x2_t)__builtin_neon_vmla_lanev2sf (__a, __b, __c, __d, 3);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmla_lane_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint16x4_t)__builtin_neon_vmla_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmla_lane_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint32x2_t)__builtin_neon_vmla_lanev2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlaq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x8_t)__builtin_neon_vmla_lanev8hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlaq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vmla_lanev4si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __d)
+{
+ return (float32x4_t)__builtin_neon_vmla_lanev4sf (__a, __b, __c, __d, 3);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlaq_lane_u16 (uint16x8_t __a, uint16x8_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint16x8_t)__builtin_neon_vmla_lanev8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlaq_lane_u32 (uint32x4_t __a, uint32x4_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint32x4_t)__builtin_neon_vmla_lanev4si ((int32x4_t) __a, (int32x4_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vmlal_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int64x2_t)__builtin_neon_vmlal_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_lane_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint32x4_t)__builtin_neon_vmlal_lanev4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_lane_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint64x2_t)__builtin_neon_vmlal_lanev2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vqdmlal_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int64x2_t)__builtin_neon_vqdmlal_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmls_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x4_t)__builtin_neon_vmls_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmls_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x2_t)__builtin_neon_vmls_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmls_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __d)
+{
+ return (float32x2_t)__builtin_neon_vmls_lanev2sf (__a, __b, __c, __d, 3);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmls_lane_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint16x4_t)__builtin_neon_vmls_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmls_lane_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint32x2_t)__builtin_neon_vmls_lanev2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x8_t)__builtin_neon_vmls_lanev8hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vmls_lanev4si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __d)
+{
+ return (float32x4_t)__builtin_neon_vmls_lanev4sf (__a, __b, __c, __d, 3);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsq_lane_u16 (uint16x8_t __a, uint16x8_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint16x8_t)__builtin_neon_vmls_lanev8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsq_lane_u32 (uint32x4_t __a, uint32x4_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint32x4_t)__builtin_neon_vmls_lanev4si ((int32x4_t) __a, (int32x4_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vmlsl_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int64x2_t)__builtin_neon_vmlsl_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_lane_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint32x4_t)__builtin_neon_vmlsl_lanev4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_lane_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint64x2_t)__builtin_neon_vmlsl_lanev2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vqdmlsl_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int64x2_t)__builtin_neon_vqdmlsl_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vmull_lanev4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vmull_lanev2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vmull_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vmull_lanev2si ((int32x2_t) __a, (int32x2_t) __b, __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmull_lanev4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmull_lanev2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vqdmulh_lanev8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmulh_lanev4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vqdmulh_lanev4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vqdmulh_lanev2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vqdmulh_lanev8hi (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmulh_lanev4si (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vqdmulh_lanev4hi (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vqdmulh_lanev2si (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int16x4_t)__builtin_neon_vmul_nv4hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int32x2_t)__builtin_neon_vmul_nv2si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_n_f32 (float32x2_t __a, float32_t __b)
+{
+ return (float32x2_t)__builtin_neon_vmul_nv2sf (__a, (__builtin_neon_sf) __b, 3);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_n_u16 (uint16x4_t __a, uint16_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vmul_nv4hi ((int16x4_t) __a, (__builtin_neon_hi) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_n_u32 (uint32x2_t __a, uint32_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vmul_nv2si ((int32x2_t) __a, (__builtin_neon_si) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return (int16x8_t)__builtin_neon_vmul_nv8hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmul_nv4si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_n_f32 (float32x4_t __a, float32_t __b)
+{
+ return (float32x4_t)__builtin_neon_vmul_nv4sf (__a, (__builtin_neon_sf) __b, 3);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vmul_nv8hi ((int16x8_t) __a, (__builtin_neon_hi) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmul_nv4si ((int32x4_t) __a, (__builtin_neon_si) __b, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmull_nv4hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int64x2_t)__builtin_neon_vmull_nv2si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_n_u16 (uint16x4_t __a, uint16_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmull_nv4hi ((int16x4_t) __a, (__builtin_neon_hi) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_n_u32 (uint32x2_t __a, uint32_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vmull_nv2si ((int32x2_t) __a, (__builtin_neon_si) __b, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmull_nv4hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqdmull_nv2si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqdmulh_nv8hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmulh_nv4si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqdmulh_nv4hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqdmulh_nv2si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqdmulh_nv8hi (__a, (__builtin_neon_hi) __b, 5);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmulh_nv4si (__a, (__builtin_neon_si) __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqdmulh_nv4hi (__a, (__builtin_neon_hi) __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqdmulh_nv2si (__a, (__builtin_neon_si) __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmla_n_s16 (int16x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int16x4_t)__builtin_neon_vmla_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmla_n_s32 (int32x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int32x2_t)__builtin_neon_vmla_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
+{
+ return (float32x2_t)__builtin_neon_vmla_nv2sf (__a, __b, (__builtin_neon_sf) __c, 3);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmla_n_u16 (uint16x4_t __a, uint16x4_t __b, uint16_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vmla_nv4hi ((int16x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmla_n_u32 (uint32x2_t __a, uint32x2_t __b, uint32_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vmla_nv2si ((int32x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlaq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmla_nv8hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmla_nv4si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
+{
+ return (float32x4_t)__builtin_neon_vmla_nv4sf (__a, __b, (__builtin_neon_sf) __c, 3);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlaq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmla_nv8hi ((int16x8_t) __a, (int16x8_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmla_nv4si ((int32x4_t) __a, (int32x4_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlal_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int64x2_t)__builtin_neon_vmlal_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlal_nv4hi ((int32x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vmlal_nv2si ((int64x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmlal_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmlal_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmls_n_s16 (int16x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int16x4_t)__builtin_neon_vmls_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmls_n_s32 (int32x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int32x2_t)__builtin_neon_vmls_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
+{
+ return (float32x2_t)__builtin_neon_vmls_nv2sf (__a, __b, (__builtin_neon_sf) __c, 3);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmls_n_u16 (uint16x4_t __a, uint16x4_t __b, uint16_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vmls_nv4hi ((int16x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmls_n_u32 (uint32x2_t __a, uint32x2_t __b, uint32_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vmls_nv2si ((int32x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmls_nv8hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmls_nv4si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
+{
+ return (float32x4_t)__builtin_neon_vmls_nv4sf (__a, __b, (__builtin_neon_sf) __c, 3);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmls_nv8hi ((int16x8_t) __a, (int16x8_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmls_nv4si ((int32x4_t) __a, (int32x4_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlsl_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int64x2_t)__builtin_neon_vmlsl_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlsl_nv4hi ((int32x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vmlsl_nv2si ((int64x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmlsl_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmlsl_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vext_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vextv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vext_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vextv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vext_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vextv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vext_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vextdi (__a, __b, __c);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vext_f32 (float32x2_t __a, float32x2_t __b, const int __c)
+{
+ return (float32x2_t)__builtin_neon_vextv2sf (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vext_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vextv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vext_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vextv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vext_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vextv2si ((int32x2_t) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vext_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vextdi ((int64x1_t) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vext_p8 (poly8x8_t __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vextv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vext_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vextv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vextq_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vextv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vextq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vextv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vextq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vextv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vextq_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vextv2di (__a, __b, __c);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vextq_f32 (float32x4_t __a, float32x4_t __b, const int __c)
+{
+ return (float32x4_t)__builtin_neon_vextv4sf (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vextq_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vextv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vextq_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vextv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vextq_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vextv4si ((int32x4_t) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vextq_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vextv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vextq_p8 (poly8x16_t __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vextv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vextq_p16 (poly16x8_t __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vextv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev64_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vrev64v8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrev64_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vrev64v4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrev64_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vrev64v2si (__a, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrev64_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrev64v2sf (__a, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev64_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vrev64v8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrev64_u16 (uint16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vrev64v4hi ((int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrev64_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vrev64v2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev64_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vrev64v8qi ((int8x8_t) __a, 2);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vrev64_p16 (poly16x4_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vrev64v4hi ((int16x4_t) __a, 2);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev64q_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vrev64v16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrev64q_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vrev64v8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrev64q_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vrev64v4si (__a, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrev64q_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrev64v4sf (__a, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev64q_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vrev64v16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrev64q_u16 (uint16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vrev64v8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrev64q_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vrev64v4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev64q_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vrev64v16qi ((int8x16_t) __a, 2);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vrev64q_p16 (poly16x8_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vrev64v8hi ((int16x8_t) __a, 2);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev32_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vrev32v8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrev32_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vrev32v4hi (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev32_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vrev32v8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrev32_u16 (uint16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vrev32v4hi ((int16x4_t) __a, 0);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev32_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vrev32v8qi ((int8x8_t) __a, 2);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vrev32_p16 (poly16x4_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vrev32v4hi ((int16x4_t) __a, 2);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev32q_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vrev32v16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrev32q_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vrev32v8hi (__a, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev32q_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vrev32v16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrev32q_u16 (uint16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vrev32v8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev32q_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vrev32v16qi ((int8x16_t) __a, 2);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vrev32q_p16 (poly16x8_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vrev32v8hi ((int16x8_t) __a, 2);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev16_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vrev16v8qi (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev16_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vrev16v8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev16_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vrev16v8qi ((int8x8_t) __a, 2);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev16q_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vrev16v16qi (__a, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev16q_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vrev16v16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev16q_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vrev16v16qi ((int8x16_t) __a, 2);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vbsl_s8 (uint8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vbslv8qi ((int8x8_t) __a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vbsl_s16 (uint16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vbslv4hi ((int16x4_t) __a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vbsl_s32 (uint32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vbslv2si ((int32x2_t) __a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vbsl_s64 (uint64x1_t __a, int64x1_t __b, int64x1_t __c)
+{
+ return (int64x1_t)__builtin_neon_vbsldi ((int64x1_t) __a, __b, __c);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vbsl_f32 (uint32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return (float32x2_t)__builtin_neon_vbslv2sf ((int32x2_t) __a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vbsl_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vbslv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vbsl_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vbslv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vbsl_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vbslv2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vbsl_u64 (uint64x1_t __a, uint64x1_t __b, uint64x1_t __c)
+{
+ return (uint64x1_t)__builtin_neon_vbsldi ((int64x1_t) __a, (int64x1_t) __b, (int64x1_t) __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vbsl_p8 (uint8x8_t __a, poly8x8_t __b, poly8x8_t __c)
+{
+ return (poly8x8_t)__builtin_neon_vbslv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vbsl_p16 (uint16x4_t __a, poly16x4_t __b, poly16x4_t __c)
+{
+ return (poly16x4_t)__builtin_neon_vbslv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vbslq_s8 (uint8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return (int8x16_t)__builtin_neon_vbslv16qi ((int8x16_t) __a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vbslq_s16 (uint16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vbslv8hi ((int16x8_t) __a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vbslq_s32 (uint32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vbslv4si ((int32x4_t) __a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vbslq_s64 (uint64x2_t __a, int64x2_t __b, int64x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vbslv2di ((int64x2_t) __a, __b, __c);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vbslq_f32 (uint32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return (float32x4_t)__builtin_neon_vbslv4sf ((int32x4_t) __a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vbslq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return (uint8x16_t)__builtin_neon_vbslv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vbslq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vbslv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vbslq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vbslv4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vbslq_u64 (uint64x2_t __a, uint64x2_t __b, uint64x2_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vbslv2di ((int64x2_t) __a, (int64x2_t) __b, (int64x2_t) __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vbslq_p8 (uint8x16_t __a, poly8x16_t __b, poly8x16_t __c)
+{
+ return (poly8x16_t)__builtin_neon_vbslv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vbslq_p16 (uint16x8_t __a, poly16x8_t __b, poly16x8_t __c)
+{
+ return (poly16x8_t)__builtin_neon_vbslv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c);
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vtrn_s8 (int8x8_t __a, int8x8_t __b)
+{
+ int8x8x2_t __rv;
+ __builtin_neon_vtrnv8qi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vtrn_s16 (int16x4_t __a, int16x4_t __b)
+{
+ int16x4x2_t __rv;
+ __builtin_neon_vtrnv4hi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vtrn_s32 (int32x2_t __a, int32x2_t __b)
+{
+ int32x2x2_t __rv;
+ __builtin_neon_vtrnv2si (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vtrn_f32 (float32x2_t __a, float32x2_t __b)
+{
+ float32x2x2_t __rv;
+ __builtin_neon_vtrnv2sf (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vtrn_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ uint8x8x2_t __rv;
+ __builtin_neon_vtrnv8qi ((int8x8_t *) &__rv.val[0], (int8x8_t) __a, (int8x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vtrn_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ uint16x4x2_t __rv;
+ __builtin_neon_vtrnv4hi ((int16x4_t *) &__rv.val[0], (int16x4_t) __a, (int16x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vtrn_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ uint32x2x2_t __rv;
+ __builtin_neon_vtrnv2si ((int32x2_t *) &__rv.val[0], (int32x2_t) __a, (int32x2_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vtrn_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ poly8x8x2_t __rv;
+ __builtin_neon_vtrnv8qi ((int8x8_t *) &__rv.val[0], (int8x8_t) __a, (int8x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vtrn_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ poly16x4x2_t __rv;
+ __builtin_neon_vtrnv4hi ((int16x4_t *) &__rv.val[0], (int16x4_t) __a, (int16x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ int8x16x2_t __rv;
+ __builtin_neon_vtrnv16qi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ int16x8x2_t __rv;
+ __builtin_neon_vtrnv8hi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ int32x4x2_t __rv;
+ __builtin_neon_vtrnv4si (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ float32x4x2_t __rv;
+ __builtin_neon_vtrnv4sf (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ uint8x16x2_t __rv;
+ __builtin_neon_vtrnv16qi ((int8x16_t *) &__rv.val[0], (int8x16_t) __a, (int8x16_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ uint16x8x2_t __rv;
+ __builtin_neon_vtrnv8hi ((int16x8_t *) &__rv.val[0], (int16x8_t) __a, (int16x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ uint32x4x2_t __rv;
+ __builtin_neon_vtrnv4si ((int32x4_t *) &__rv.val[0], (int32x4_t) __a, (int32x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ poly8x16x2_t __rv;
+ __builtin_neon_vtrnv16qi ((int8x16_t *) &__rv.val[0], (int8x16_t) __a, (int8x16_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_p16 (poly16x8_t __a, poly16x8_t __b)
+{
+ poly16x8x2_t __rv;
+ __builtin_neon_vtrnv8hi ((int16x8_t *) &__rv.val[0], (int16x8_t) __a, (int16x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vzip_s8 (int8x8_t __a, int8x8_t __b)
+{
+ int8x8x2_t __rv;
+ __builtin_neon_vzipv8qi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vzip_s16 (int16x4_t __a, int16x4_t __b)
+{
+ int16x4x2_t __rv;
+ __builtin_neon_vzipv4hi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vzip_s32 (int32x2_t __a, int32x2_t __b)
+{
+ int32x2x2_t __rv;
+ __builtin_neon_vzipv2si (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vzip_f32 (float32x2_t __a, float32x2_t __b)
+{
+ float32x2x2_t __rv;
+ __builtin_neon_vzipv2sf (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vzip_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ uint8x8x2_t __rv;
+ __builtin_neon_vzipv8qi ((int8x8_t *) &__rv.val[0], (int8x8_t) __a, (int8x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vzip_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ uint16x4x2_t __rv;
+ __builtin_neon_vzipv4hi ((int16x4_t *) &__rv.val[0], (int16x4_t) __a, (int16x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vzip_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ uint32x2x2_t __rv;
+ __builtin_neon_vzipv2si ((int32x2_t *) &__rv.val[0], (int32x2_t) __a, (int32x2_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vzip_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ poly8x8x2_t __rv;
+ __builtin_neon_vzipv8qi ((int8x8_t *) &__rv.val[0], (int8x8_t) __a, (int8x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vzip_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ poly16x4x2_t __rv;
+ __builtin_neon_vzipv4hi ((int16x4_t *) &__rv.val[0], (int16x4_t) __a, (int16x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vzipq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ int8x16x2_t __rv;
+ __builtin_neon_vzipv16qi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vzipq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ int16x8x2_t __rv;
+ __builtin_neon_vzipv8hi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vzipq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ int32x4x2_t __rv;
+ __builtin_neon_vzipv4si (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vzipq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ float32x4x2_t __rv;
+ __builtin_neon_vzipv4sf (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vzipq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ uint8x16x2_t __rv;
+ __builtin_neon_vzipv16qi ((int8x16_t *) &__rv.val[0], (int8x16_t) __a, (int8x16_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vzipq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ uint16x8x2_t __rv;
+ __builtin_neon_vzipv8hi ((int16x8_t *) &__rv.val[0], (int16x8_t) __a, (int16x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vzipq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ uint32x4x2_t __rv;
+ __builtin_neon_vzipv4si ((int32x4_t *) &__rv.val[0], (int32x4_t) __a, (int32x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vzipq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ poly8x16x2_t __rv;
+ __builtin_neon_vzipv16qi ((int8x16_t *) &__rv.val[0], (int8x16_t) __a, (int8x16_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vzipq_p16 (poly16x8_t __a, poly16x8_t __b)
+{
+ poly16x8x2_t __rv;
+ __builtin_neon_vzipv8hi ((int16x8_t *) &__rv.val[0], (int16x8_t) __a, (int16x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vuzp_s8 (int8x8_t __a, int8x8_t __b)
+{
+ int8x8x2_t __rv;
+ __builtin_neon_vuzpv8qi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vuzp_s16 (int16x4_t __a, int16x4_t __b)
+{
+ int16x4x2_t __rv;
+ __builtin_neon_vuzpv4hi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vuzp_s32 (int32x2_t __a, int32x2_t __b)
+{
+ int32x2x2_t __rv;
+ __builtin_neon_vuzpv2si (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vuzp_f32 (float32x2_t __a, float32x2_t __b)
+{
+ float32x2x2_t __rv;
+ __builtin_neon_vuzpv2sf (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vuzp_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ uint8x8x2_t __rv;
+ __builtin_neon_vuzpv8qi ((int8x8_t *) &__rv.val[0], (int8x8_t) __a, (int8x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vuzp_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ uint16x4x2_t __rv;
+ __builtin_neon_vuzpv4hi ((int16x4_t *) &__rv.val[0], (int16x4_t) __a, (int16x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vuzp_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ uint32x2x2_t __rv;
+ __builtin_neon_vuzpv2si ((int32x2_t *) &__rv.val[0], (int32x2_t) __a, (int32x2_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vuzp_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ poly8x8x2_t __rv;
+ __builtin_neon_vuzpv8qi ((int8x8_t *) &__rv.val[0], (int8x8_t) __a, (int8x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vuzp_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ poly16x4x2_t __rv;
+ __builtin_neon_vuzpv4hi ((int16x4_t *) &__rv.val[0], (int16x4_t) __a, (int16x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vuzpq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ int8x16x2_t __rv;
+ __builtin_neon_vuzpv16qi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vuzpq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ int16x8x2_t __rv;
+ __builtin_neon_vuzpv8hi (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vuzpq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ int32x4x2_t __rv;
+ __builtin_neon_vuzpv4si (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vuzpq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ float32x4x2_t __rv;
+ __builtin_neon_vuzpv4sf (&__rv.val[0], __a, __b);
+ return __rv;
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vuzpq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ uint8x16x2_t __rv;
+ __builtin_neon_vuzpv16qi ((int8x16_t *) &__rv.val[0], (int8x16_t) __a, (int8x16_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vuzpq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ uint16x8x2_t __rv;
+ __builtin_neon_vuzpv8hi ((int16x8_t *) &__rv.val[0], (int16x8_t) __a, (int16x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vuzpq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ uint32x4x2_t __rv;
+ __builtin_neon_vuzpv4si ((int32x4_t *) &__rv.val[0], (int32x4_t) __a, (int32x4_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vuzpq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ poly8x16x2_t __rv;
+ __builtin_neon_vuzpv16qi ((int8x16_t *) &__rv.val[0], (int8x16_t) __a, (int8x16_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vuzpq_p16 (poly16x8_t __a, poly16x8_t __b)
+{
+ poly16x8x2_t __rv;
+ __builtin_neon_vuzpv8hi ((int16x8_t *) &__rv.val[0], (int16x8_t) __a, (int16x8_t) __b);
+ return __rv;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vld1_s8 (const int8_t * __a)
+{
+ return (int8x8_t)__builtin_neon_vld1v8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vld1_s16 (const int16_t * __a)
+{
+ return (int16x4_t)__builtin_neon_vld1v4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vld1_s32 (const int32_t * __a)
+{
+ return (int32x2_t)__builtin_neon_vld1v2si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vld1_s64 (const int64_t * __a)
+{
+ return (int64x1_t)__builtin_neon_vld1di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vld1_f32 (const float32_t * __a)
+{
+ return (float32x2_t)__builtin_neon_vld1v2sf ((const __builtin_neon_sf *) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vld1_u8 (const uint8_t * __a)
+{
+ return (uint8x8_t)__builtin_neon_vld1v8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vld1_u16 (const uint16_t * __a)
+{
+ return (uint16x4_t)__builtin_neon_vld1v4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vld1_u32 (const uint32_t * __a)
+{
+ return (uint32x2_t)__builtin_neon_vld1v2si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vld1_u64 (const uint64_t * __a)
+{
+ return (uint64x1_t)__builtin_neon_vld1di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vld1_p8 (const poly8_t * __a)
+{
+ return (poly8x8_t)__builtin_neon_vld1v8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vld1_p16 (const poly16_t * __a)
+{
+ return (poly16x4_t)__builtin_neon_vld1v4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vld1q_s8 (const int8_t * __a)
+{
+ return (int8x16_t)__builtin_neon_vld1v16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vld1q_s16 (const int16_t * __a)
+{
+ return (int16x8_t)__builtin_neon_vld1v8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vld1q_s32 (const int32_t * __a)
+{
+ return (int32x4_t)__builtin_neon_vld1v4si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vld1q_s64 (const int64_t * __a)
+{
+ return (int64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vld1q_f32 (const float32_t * __a)
+{
+ return (float32x4_t)__builtin_neon_vld1v4sf ((const __builtin_neon_sf *) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vld1q_u8 (const uint8_t * __a)
+{
+ return (uint8x16_t)__builtin_neon_vld1v16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vld1q_u16 (const uint16_t * __a)
+{
+ return (uint16x8_t)__builtin_neon_vld1v8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vld1q_u32 (const uint32_t * __a)
+{
+ return (uint32x4_t)__builtin_neon_vld1v4si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vld1q_u64 (const uint64_t * __a)
+{
+ return (uint64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vld1q_p8 (const poly8_t * __a)
+{
+ return (poly8x16_t)__builtin_neon_vld1v16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vld1q_p16 (const poly16_t * __a)
+{
+ return (poly16x8_t)__builtin_neon_vld1v8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vld1_lane_s8 (const int8_t * __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vld1_lanev8qi ((const __builtin_neon_qi *) __a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vld1_lane_s16 (const int16_t * __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vld1_lanev4hi ((const __builtin_neon_hi *) __a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vld1_lane_s32 (const int32_t * __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vld1_lanev2si ((const __builtin_neon_si *) __a, __b, __c);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vld1_lane_f32 (const float32_t * __a, float32x2_t __b, const int __c)
+{
+ return (float32x2_t)__builtin_neon_vld1_lanev2sf ((const __builtin_neon_sf *) __a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vld1_lane_u8 (const uint8_t * __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vld1_lanev8qi ((const __builtin_neon_qi *) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vld1_lane_u16 (const uint16_t * __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vld1_lanev4hi ((const __builtin_neon_hi *) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vld1_lane_u32 (const uint32_t * __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vld1_lanev2si ((const __builtin_neon_si *) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vld1_lane_p8 (const poly8_t * __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vld1_lanev8qi ((const __builtin_neon_qi *) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vld1_lane_p16 (const poly16_t * __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vld1_lanev4hi ((const __builtin_neon_hi *) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vld1_lane_s64 (const int64_t * __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vld1_lanedi ((const __builtin_neon_di *) __a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vld1_lane_u64 (const uint64_t * __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vld1_lanedi ((const __builtin_neon_di *) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vld1q_lane_s8 (const int8_t * __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vld1_lanev16qi ((const __builtin_neon_qi *) __a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vld1q_lane_s16 (const int16_t * __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vld1_lanev8hi ((const __builtin_neon_hi *) __a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vld1q_lane_s32 (const int32_t * __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vld1_lanev4si ((const __builtin_neon_si *) __a, __b, __c);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vld1q_lane_f32 (const float32_t * __a, float32x4_t __b, const int __c)
+{
+ return (float32x4_t)__builtin_neon_vld1_lanev4sf ((const __builtin_neon_sf *) __a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vld1q_lane_u8 (const uint8_t * __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vld1_lanev16qi ((const __builtin_neon_qi *) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vld1q_lane_u16 (const uint16_t * __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vld1_lanev8hi ((const __builtin_neon_hi *) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vld1q_lane_u32 (const uint32_t * __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vld1_lanev4si ((const __builtin_neon_si *) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vld1q_lane_p8 (const poly8_t * __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vld1_lanev16qi ((const __builtin_neon_qi *) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vld1q_lane_p16 (const poly16_t * __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vld1_lanev8hi ((const __builtin_neon_hi *) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vld1q_lane_s64 (const int64_t * __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vld1_lanev2di ((const __builtin_neon_di *) __a, __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vld1q_lane_u64 (const uint64_t * __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vld1_lanev2di ((const __builtin_neon_di *) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vld1_dup_s8 (const int8_t * __a)
+{
+ return (int8x8_t)__builtin_neon_vld1_dupv8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vld1_dup_s16 (const int16_t * __a)
+{
+ return (int16x4_t)__builtin_neon_vld1_dupv4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vld1_dup_s32 (const int32_t * __a)
+{
+ return (int32x2_t)__builtin_neon_vld1_dupv2si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vld1_dup_f32 (const float32_t * __a)
+{
+ return (float32x2_t)__builtin_neon_vld1_dupv2sf ((const __builtin_neon_sf *) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vld1_dup_u8 (const uint8_t * __a)
+{
+ return (uint8x8_t)__builtin_neon_vld1_dupv8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vld1_dup_u16 (const uint16_t * __a)
+{
+ return (uint16x4_t)__builtin_neon_vld1_dupv4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vld1_dup_u32 (const uint32_t * __a)
+{
+ return (uint32x2_t)__builtin_neon_vld1_dupv2si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vld1_dup_p8 (const poly8_t * __a)
+{
+ return (poly8x8_t)__builtin_neon_vld1_dupv8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vld1_dup_p16 (const poly16_t * __a)
+{
+ return (poly16x4_t)__builtin_neon_vld1_dupv4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vld1_dup_s64 (const int64_t * __a)
+{
+ return (int64x1_t)__builtin_neon_vld1_dupdi ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vld1_dup_u64 (const uint64_t * __a)
+{
+ return (uint64x1_t)__builtin_neon_vld1_dupdi ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_s8 (const int8_t * __a)
+{
+ return (int8x16_t)__builtin_neon_vld1_dupv16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_s16 (const int16_t * __a)
+{
+ return (int16x8_t)__builtin_neon_vld1_dupv8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_s32 (const int32_t * __a)
+{
+ return (int32x4_t)__builtin_neon_vld1_dupv4si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_f32 (const float32_t * __a)
+{
+ return (float32x4_t)__builtin_neon_vld1_dupv4sf ((const __builtin_neon_sf *) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_u8 (const uint8_t * __a)
+{
+ return (uint8x16_t)__builtin_neon_vld1_dupv16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_u16 (const uint16_t * __a)
+{
+ return (uint16x8_t)__builtin_neon_vld1_dupv8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_u32 (const uint32_t * __a)
+{
+ return (uint32x4_t)__builtin_neon_vld1_dupv4si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_p8 (const poly8_t * __a)
+{
+ return (poly8x16_t)__builtin_neon_vld1_dupv16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_p16 (const poly16_t * __a)
+{
+ return (poly16x8_t)__builtin_neon_vld1_dupv8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_s64 (const int64_t * __a)
+{
+ return (int64x2_t)__builtin_neon_vld1_dupv2di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_u64 (const uint64_t * __a)
+{
+ return (uint64x2_t)__builtin_neon_vld1_dupv2di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s8 (int8_t * __a, int8x8_t __b)
+{
+ __builtin_neon_vst1v8qi ((__builtin_neon_qi *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s16 (int16_t * __a, int16x4_t __b)
+{
+ __builtin_neon_vst1v4hi ((__builtin_neon_hi *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s32 (int32_t * __a, int32x2_t __b)
+{
+ __builtin_neon_vst1v2si ((__builtin_neon_si *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s64 (int64_t * __a, int64x1_t __b)
+{
+ __builtin_neon_vst1di ((__builtin_neon_di *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_f32 (float32_t * __a, float32x2_t __b)
+{
+ __builtin_neon_vst1v2sf ((__builtin_neon_sf *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u8 (uint8_t * __a, uint8x8_t __b)
+{
+ __builtin_neon_vst1v8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u16 (uint16_t * __a, uint16x4_t __b)
+{
+ __builtin_neon_vst1v4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u32 (uint32_t * __a, uint32x2_t __b)
+{
+ __builtin_neon_vst1v2si ((__builtin_neon_si *) __a, (int32x2_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u64 (uint64_t * __a, uint64x1_t __b)
+{
+ __builtin_neon_vst1di ((__builtin_neon_di *) __a, (int64x1_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_p8 (poly8_t * __a, poly8x8_t __b)
+{
+ __builtin_neon_vst1v8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_p16 (poly16_t * __a, poly16x4_t __b)
+{
+ __builtin_neon_vst1v4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s8 (int8_t * __a, int8x16_t __b)
+{
+ __builtin_neon_vst1v16qi ((__builtin_neon_qi *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s16 (int16_t * __a, int16x8_t __b)
+{
+ __builtin_neon_vst1v8hi ((__builtin_neon_hi *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s32 (int32_t * __a, int32x4_t __b)
+{
+ __builtin_neon_vst1v4si ((__builtin_neon_si *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s64 (int64_t * __a, int64x2_t __b)
+{
+ __builtin_neon_vst1v2di ((__builtin_neon_di *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_f32 (float32_t * __a, float32x4_t __b)
+{
+ __builtin_neon_vst1v4sf ((__builtin_neon_sf *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u8 (uint8_t * __a, uint8x16_t __b)
+{
+ __builtin_neon_vst1v16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u16 (uint16_t * __a, uint16x8_t __b)
+{
+ __builtin_neon_vst1v8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u32 (uint32_t * __a, uint32x4_t __b)
+{
+ __builtin_neon_vst1v4si ((__builtin_neon_si *) __a, (int32x4_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u64 (uint64_t * __a, uint64x2_t __b)
+{
+ __builtin_neon_vst1v2di ((__builtin_neon_di *) __a, (int64x2_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_p8 (poly8_t * __a, poly8x16_t __b)
+{
+ __builtin_neon_vst1v16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_p16 (poly16_t * __a, poly16x8_t __b)
+{
+ __builtin_neon_vst1v8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_s8 (int8_t * __a, int8x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8qi ((__builtin_neon_qi *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_s16 (int16_t * __a, int16x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4hi ((__builtin_neon_hi *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_s32 (int32_t * __a, int32x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2si ((__builtin_neon_si *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_f32 (float32_t * __a, float32x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2sf ((__builtin_neon_sf *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_u8 (uint8_t * __a, uint8x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_u16 (uint16_t * __a, uint16x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_u32 (uint32_t * __a, uint32x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2si ((__builtin_neon_si *) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_p8 (poly8_t * __a, poly8x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_p16 (poly16_t * __a, poly16x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_s64 (int64_t * __a, int64x1_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanedi ((__builtin_neon_di *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_u64 (uint64_t * __a, uint64x1_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanedi ((__builtin_neon_di *) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_s8 (int8_t * __a, int8x16_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev16qi ((__builtin_neon_qi *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_s16 (int16_t * __a, int16x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8hi ((__builtin_neon_hi *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_s32 (int32_t * __a, int32x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4si ((__builtin_neon_si *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_f32 (float32_t * __a, float32x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4sf ((__builtin_neon_sf *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_u8 (uint8_t * __a, uint8x16_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_u16 (uint16_t * __a, uint16x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_u32 (uint32_t * __a, uint32x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4si ((__builtin_neon_si *) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_p8 (poly8_t * __a, poly8x16_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_p16 (poly16_t * __a, poly16x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_s64 (int64_t * __a, int64x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2di ((__builtin_neon_di *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_u64 (uint64_t * __a, uint64x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2di ((__builtin_neon_di *) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vld2_s8 (const int8_t * __a)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vld2_s16 (const int16_t * __a)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vld2_s32 (const int32_t * __a)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vld2_f32 (const float32_t * __a)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vld2_u8 (const uint8_t * __a)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vld2_u16 (const uint16_t * __a)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vld2_u32 (const uint32_t * __a)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vld2_p8 (const poly8_t * __a)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vld2_p16 (const poly16_t * __a)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__))
+vld2_s64 (const int64_t * __a)
+{
+ union { int64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x2_t __attribute__ ((__always_inline__))
+vld2_u64 (const uint64_t * __a)
+{
+ union { uint64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vld2q_s8 (const int8_t * __a)
+{
+ union { int8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vld2q_s16 (const int16_t * __a)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vld2q_s32 (const int32_t * __a)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vld2q_f32 (const float32_t * __a)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vld2q_u8 (const uint8_t * __a)
+{
+ union { uint8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vld2q_u16 (const uint16_t * __a)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vld2q_u32 (const uint32_t * __a)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vld2q_p8 (const poly8_t * __a)
+{
+ union { poly8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vld2q_p16 (const poly16_t * __a)
+{
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vld2_lane_s8 (const int8_t * __a, int8x8x2_t __b, const int __c)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vld2_lane_s16 (const int16_t * __a, int16x4x2_t __b, const int __c)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vld2_lane_s32 (const int32_t * __a, int32x2x2_t __b, const int __c)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vld2_lane_f32 (const float32_t * __a, float32x2x2_t __b, const int __c)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev2sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vld2_lane_u8 (const uint8_t * __a, uint8x8x2_t __b, const int __c)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vld2_lane_u16 (const uint16_t * __a, uint16x4x2_t __b, const int __c)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vld2_lane_u32 (const uint32_t * __a, uint32x2x2_t __b, const int __c)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vld2_lane_p8 (const poly8_t * __a, poly8x8x2_t __b, const int __c)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vld2_lane_p16 (const poly16_t * __a, poly16x4x2_t __b, const int __c)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vld2q_lane_s16 (const int16_t * __a, int16x8x2_t __b, const int __c)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vld2q_lane_s32 (const int32_t * __a, int32x4x2_t __b, const int __c)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vld2q_lane_f32 (const float32_t * __a, float32x4x2_t __b, const int __c)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vld2q_lane_u16 (const uint16_t * __a, uint16x8x2_t __b, const int __c)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vld2q_lane_u32 (const uint32_t * __a, uint32x4x2_t __b, const int __c)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vld2q_lane_p16 (const poly16_t * __a, poly16x8x2_t __b, const int __c)
+{
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vld2_dup_s8 (const int8_t * __a)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vld2_dup_s16 (const int16_t * __a)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vld2_dup_s32 (const int32_t * __a)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vld2_dup_f32 (const float32_t * __a)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vld2_dup_u8 (const uint8_t * __a)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vld2_dup_u16 (const uint16_t * __a)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vld2_dup_u32 (const uint32_t * __a)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vld2_dup_p8 (const poly8_t * __a)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vld2_dup_p16 (const poly16_t * __a)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__))
+vld2_dup_s64 (const int64_t * __a)
+{
+ union { int64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x2_t __attribute__ ((__always_inline__))
+vld2_dup_u64 (const uint64_t * __a)
+{
+ union { uint64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s8 (int8_t * __a, int8x8x2_t __b)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s16 (int16_t * __a, int16x4x2_t __b)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s32 (int32_t * __a, int32x2x2_t __b)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_f32 (float32_t * __a, float32x2x2_t __b)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v2sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u8 (uint8_t * __a, uint8x8x2_t __b)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u16 (uint16_t * __a, uint16x4x2_t __b)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u32 (uint32_t * __a, uint32x2x2_t __b)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_p8 (poly8_t * __a, poly8x8x2_t __b)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_p16 (poly16_t * __a, poly16x4x2_t __b)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s64 (int64_t * __a, int64x1x2_t __b)
+{
+ union { int64x1x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u64 (uint64_t * __a, uint64x1x2_t __b)
+{
+ union { uint64x1x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s8 (int8_t * __a, int8x16x2_t __b)
+{
+ union { int8x16x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s16 (int16_t * __a, int16x8x2_t __b)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s32 (int32_t * __a, int32x4x2_t __b)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_f32 (float32_t * __a, float32x4x2_t __b)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v4sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u8 (uint8_t * __a, uint8x16x2_t __b)
+{
+ union { uint8x16x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u16 (uint16_t * __a, uint16x8x2_t __b)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u32 (uint32_t * __a, uint32x4x2_t __b)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_p8 (poly8_t * __a, poly8x16x2_t __b)
+{
+ union { poly8x16x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_p16 (poly16_t * __a, poly16x8x2_t __b)
+{
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_s8 (int8_t * __a, int8x8x2_t __b, const int __c)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_s16 (int16_t * __a, int16x4x2_t __b, const int __c)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_s32 (int32_t * __a, int32x2x2_t __b, const int __c)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_f32 (float32_t * __a, float32x2x2_t __b, const int __c)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev2sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_u8 (uint8_t * __a, uint8x8x2_t __b, const int __c)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_u16 (uint16_t * __a, uint16x4x2_t __b, const int __c)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_u32 (uint32_t * __a, uint32x2x2_t __b, const int __c)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_p8 (poly8_t * __a, poly8x8x2_t __b, const int __c)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_p16 (poly16_t * __a, poly16x4x2_t __b, const int __c)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_s16 (int16_t * __a, int16x8x2_t __b, const int __c)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_s32 (int32_t * __a, int32x4x2_t __b, const int __c)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_f32 (float32_t * __a, float32x4x2_t __b, const int __c)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_u16 (uint16_t * __a, uint16x8x2_t __b, const int __c)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_u32 (uint32_t * __a, uint32x4x2_t __b, const int __c)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_p16 (poly16_t * __a, poly16x8x2_t __b, const int __c)
+{
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__))
+vld3_s8 (const int8_t * __a)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__))
+vld3_s16 (const int16_t * __a)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__))
+vld3_s32 (const int32_t * __a)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__))
+vld3_f32 (const float32_t * __a)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__))
+vld3_u8 (const uint8_t * __a)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__))
+vld3_u16 (const uint16_t * __a)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__))
+vld3_u32 (const uint32_t * __a)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__))
+vld3_p8 (const poly8_t * __a)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__))
+vld3_p16 (const poly16_t * __a)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__))
+vld3_s64 (const int64_t * __a)
+{
+ union { int64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x3_t __attribute__ ((__always_inline__))
+vld3_u64 (const uint64_t * __a)
+{
+ union { uint64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x16x3_t __attribute__ ((__always_inline__))
+vld3q_s8 (const int8_t * __a)
+{
+ union { int8x16x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x3_t __attribute__ ((__always_inline__))
+vld3q_s16 (const int16_t * __a)
+{
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x3_t __attribute__ ((__always_inline__))
+vld3q_s32 (const int32_t * __a)
+{
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x3_t __attribute__ ((__always_inline__))
+vld3q_f32 (const float32_t * __a)
+{
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x16x3_t __attribute__ ((__always_inline__))
+vld3q_u8 (const uint8_t * __a)
+{
+ union { uint8x16x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x3_t __attribute__ ((__always_inline__))
+vld3q_u16 (const uint16_t * __a)
+{
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x3_t __attribute__ ((__always_inline__))
+vld3q_u32 (const uint32_t * __a)
+{
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x16x3_t __attribute__ ((__always_inline__))
+vld3q_p8 (const poly8_t * __a)
+{
+ union { poly8x16x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x3_t __attribute__ ((__always_inline__))
+vld3q_p16 (const poly16_t * __a)
+{
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__))
+vld3_lane_s8 (const int8_t * __a, int8x8x3_t __b, const int __c)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__))
+vld3_lane_s16 (const int16_t * __a, int16x4x3_t __b, const int __c)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__))
+vld3_lane_s32 (const int32_t * __a, int32x2x3_t __b, const int __c)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__))
+vld3_lane_f32 (const float32_t * __a, float32x2x3_t __b, const int __c)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev2sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__))
+vld3_lane_u8 (const uint8_t * __a, uint8x8x3_t __b, const int __c)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__))
+vld3_lane_u16 (const uint16_t * __a, uint16x4x3_t __b, const int __c)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__))
+vld3_lane_u32 (const uint32_t * __a, uint32x2x3_t __b, const int __c)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__))
+vld3_lane_p8 (const poly8_t * __a, poly8x8x3_t __b, const int __c)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__))
+vld3_lane_p16 (const poly16_t * __a, poly16x4x3_t __b, const int __c)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x3_t __attribute__ ((__always_inline__))
+vld3q_lane_s16 (const int16_t * __a, int16x8x3_t __b, const int __c)
+{
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x3_t __attribute__ ((__always_inline__))
+vld3q_lane_s32 (const int32_t * __a, int32x4x3_t __b, const int __c)
+{
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x3_t __attribute__ ((__always_inline__))
+vld3q_lane_f32 (const float32_t * __a, float32x4x3_t __b, const int __c)
+{
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x3_t __attribute__ ((__always_inline__))
+vld3q_lane_u16 (const uint16_t * __a, uint16x8x3_t __b, const int __c)
+{
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x3_t __attribute__ ((__always_inline__))
+vld3q_lane_u32 (const uint32_t * __a, uint32x4x3_t __b, const int __c)
+{
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x3_t __attribute__ ((__always_inline__))
+vld3q_lane_p16 (const poly16_t * __a, poly16x8x3_t __b, const int __c)
+{
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__))
+vld3_dup_s8 (const int8_t * __a)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__))
+vld3_dup_s16 (const int16_t * __a)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__))
+vld3_dup_s32 (const int32_t * __a)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__))
+vld3_dup_f32 (const float32_t * __a)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__))
+vld3_dup_u8 (const uint8_t * __a)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__))
+vld3_dup_u16 (const uint16_t * __a)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__))
+vld3_dup_u32 (const uint32_t * __a)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__))
+vld3_dup_p8 (const poly8_t * __a)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__))
+vld3_dup_p16 (const poly16_t * __a)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__))
+vld3_dup_s64 (const int64_t * __a)
+{
+ union { int64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x3_t __attribute__ ((__always_inline__))
+vld3_dup_u64 (const uint64_t * __a)
+{
+ union { uint64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s8 (int8_t * __a, int8x8x3_t __b)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s16 (int16_t * __a, int16x4x3_t __b)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s32 (int32_t * __a, int32x2x3_t __b)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_f32 (float32_t * __a, float32x2x3_t __b)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v2sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u8 (uint8_t * __a, uint8x8x3_t __b)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u16 (uint16_t * __a, uint16x4x3_t __b)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u32 (uint32_t * __a, uint32x2x3_t __b)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_p8 (poly8_t * __a, poly8x8x3_t __b)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_p16 (poly16_t * __a, poly16x4x3_t __b)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s64 (int64_t * __a, int64x1x3_t __b)
+{
+ union { int64x1x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u64 (uint64_t * __a, uint64x1x3_t __b)
+{
+ union { uint64x1x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s8 (int8_t * __a, int8x16x3_t __b)
+{
+ union { int8x16x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s16 (int16_t * __a, int16x8x3_t __b)
+{
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s32 (int32_t * __a, int32x4x3_t __b)
+{
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_f32 (float32_t * __a, float32x4x3_t __b)
+{
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v4sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u8 (uint8_t * __a, uint8x16x3_t __b)
+{
+ union { uint8x16x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u16 (uint16_t * __a, uint16x8x3_t __b)
+{
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u32 (uint32_t * __a, uint32x4x3_t __b)
+{
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_p8 (poly8_t * __a, poly8x16x3_t __b)
+{
+ union { poly8x16x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_p16 (poly16_t * __a, poly16x8x3_t __b)
+{
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_s8 (int8_t * __a, int8x8x3_t __b, const int __c)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_s16 (int16_t * __a, int16x4x3_t __b, const int __c)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_s32 (int32_t * __a, int32x2x3_t __b, const int __c)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_f32 (float32_t * __a, float32x2x3_t __b, const int __c)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev2sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_u8 (uint8_t * __a, uint8x8x3_t __b, const int __c)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_u16 (uint16_t * __a, uint16x4x3_t __b, const int __c)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_u32 (uint32_t * __a, uint32x2x3_t __b, const int __c)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_p8 (poly8_t * __a, poly8x8x3_t __b, const int __c)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_p16 (poly16_t * __a, poly16x4x3_t __b, const int __c)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_s16 (int16_t * __a, int16x8x3_t __b, const int __c)
+{
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_s32 (int32_t * __a, int32x4x3_t __b, const int __c)
+{
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_f32 (float32_t * __a, float32x4x3_t __b, const int __c)
+{
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_u16 (uint16_t * __a, uint16x8x3_t __b, const int __c)
+{
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_u32 (uint32_t * __a, uint32x4x3_t __b, const int __c)
+{
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_p16 (poly16_t * __a, poly16x8x3_t __b, const int __c)
+{
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__))
+vld4_s8 (const int8_t * __a)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__))
+vld4_s16 (const int16_t * __a)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__))
+vld4_s32 (const int32_t * __a)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__))
+vld4_f32 (const float32_t * __a)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__))
+vld4_u8 (const uint8_t * __a)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__))
+vld4_u16 (const uint16_t * __a)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__))
+vld4_u32 (const uint32_t * __a)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__))
+vld4_p8 (const poly8_t * __a)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__))
+vld4_p16 (const poly16_t * __a)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__))
+vld4_s64 (const int64_t * __a)
+{
+ union { int64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x4_t __attribute__ ((__always_inline__))
+vld4_u64 (const uint64_t * __a)
+{
+ union { uint64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x16x4_t __attribute__ ((__always_inline__))
+vld4q_s8 (const int8_t * __a)
+{
+ union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x4_t __attribute__ ((__always_inline__))
+vld4q_s16 (const int16_t * __a)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x4_t __attribute__ ((__always_inline__))
+vld4q_s32 (const int32_t * __a)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x4_t __attribute__ ((__always_inline__))
+vld4q_f32 (const float32_t * __a)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x16x4_t __attribute__ ((__always_inline__))
+vld4q_u8 (const uint8_t * __a)
+{
+ union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x4_t __attribute__ ((__always_inline__))
+vld4q_u16 (const uint16_t * __a)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x4_t __attribute__ ((__always_inline__))
+vld4q_u32 (const uint32_t * __a)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x16x4_t __attribute__ ((__always_inline__))
+vld4q_p8 (const poly8_t * __a)
+{
+ union { poly8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x4_t __attribute__ ((__always_inline__))
+vld4q_p16 (const poly16_t * __a)
+{
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__))
+vld4_lane_s8 (const int8_t * __a, int8x8x4_t __b, const int __c)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__))
+vld4_lane_s16 (const int16_t * __a, int16x4x4_t __b, const int __c)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__))
+vld4_lane_s32 (const int32_t * __a, int32x2x4_t __b, const int __c)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__))
+vld4_lane_f32 (const float32_t * __a, float32x2x4_t __b, const int __c)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev2sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__))
+vld4_lane_u8 (const uint8_t * __a, uint8x8x4_t __b, const int __c)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__))
+vld4_lane_u16 (const uint16_t * __a, uint16x4x4_t __b, const int __c)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__))
+vld4_lane_u32 (const uint32_t * __a, uint32x2x4_t __b, const int __c)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__))
+vld4_lane_p8 (const poly8_t * __a, poly8x8x4_t __b, const int __c)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__))
+vld4_lane_p16 (const poly16_t * __a, poly16x4x4_t __b, const int __c)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x4_t __attribute__ ((__always_inline__))
+vld4q_lane_s16 (const int16_t * __a, int16x8x4_t __b, const int __c)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x4_t __attribute__ ((__always_inline__))
+vld4q_lane_s32 (const int32_t * __a, int32x4x4_t __b, const int __c)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x4_t __attribute__ ((__always_inline__))
+vld4q_lane_f32 (const float32_t * __a, float32x4x4_t __b, const int __c)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x4_t __attribute__ ((__always_inline__))
+vld4q_lane_u16 (const uint16_t * __a, uint16x8x4_t __b, const int __c)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x4_t __attribute__ ((__always_inline__))
+vld4q_lane_u32 (const uint32_t * __a, uint32x4x4_t __b, const int __c)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x4_t __attribute__ ((__always_inline__))
+vld4q_lane_p16 (const poly16_t * __a, poly16x8x4_t __b, const int __c)
+{
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__))
+vld4_dup_s8 (const int8_t * __a)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__))
+vld4_dup_s16 (const int16_t * __a)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__))
+vld4_dup_s32 (const int32_t * __a)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__))
+vld4_dup_f32 (const float32_t * __a)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__))
+vld4_dup_u8 (const uint8_t * __a)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__))
+vld4_dup_u16 (const uint16_t * __a)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__))
+vld4_dup_u32 (const uint32_t * __a)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__))
+vld4_dup_p8 (const poly8_t * __a)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__))
+vld4_dup_p16 (const poly16_t * __a)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__))
+vld4_dup_s64 (const int64_t * __a)
+{
+ union { int64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x4_t __attribute__ ((__always_inline__))
+vld4_dup_u64 (const uint64_t * __a)
+{
+ union { uint64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s8 (int8_t * __a, int8x8x4_t __b)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s16 (int16_t * __a, int16x4x4_t __b)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s32 (int32_t * __a, int32x2x4_t __b)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_f32 (float32_t * __a, float32x2x4_t __b)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v2sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u8 (uint8_t * __a, uint8x8x4_t __b)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u16 (uint16_t * __a, uint16x4x4_t __b)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u32 (uint32_t * __a, uint32x2x4_t __b)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_p8 (poly8_t * __a, poly8x8x4_t __b)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_p16 (poly16_t * __a, poly16x4x4_t __b)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s64 (int64_t * __a, int64x1x4_t __b)
+{
+ union { int64x1x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u64 (uint64_t * __a, uint64x1x4_t __b)
+{
+ union { uint64x1x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s8 (int8_t * __a, int8x16x4_t __b)
+{
+ union { int8x16x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s16 (int16_t * __a, int16x8x4_t __b)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s32 (int32_t * __a, int32x4x4_t __b)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_f32 (float32_t * __a, float32x4x4_t __b)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v4sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u8 (uint8_t * __a, uint8x16x4_t __b)
+{
+ union { uint8x16x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u16 (uint16_t * __a, uint16x8x4_t __b)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u32 (uint32_t * __a, uint32x4x4_t __b)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_p8 (poly8_t * __a, poly8x16x4_t __b)
+{
+ union { poly8x16x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_p16 (poly16_t * __a, poly16x8x4_t __b)
+{
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_s8 (int8_t * __a, int8x8x4_t __b, const int __c)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_s16 (int16_t * __a, int16x4x4_t __b, const int __c)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_s32 (int32_t * __a, int32x2x4_t __b, const int __c)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_f32 (float32_t * __a, float32x2x4_t __b, const int __c)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev2sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_u8 (uint8_t * __a, uint8x8x4_t __b, const int __c)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_u16 (uint16_t * __a, uint16x4x4_t __b, const int __c)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_u32 (uint32_t * __a, uint32x2x4_t __b, const int __c)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_p8 (poly8_t * __a, poly8x8x4_t __b, const int __c)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_p16 (poly16_t * __a, poly16x4x4_t __b, const int __c)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_s16 (int16_t * __a, int16x8x4_t __b, const int __c)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_s32 (int32_t * __a, int32x4x4_t __b, const int __c)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_f32 (float32_t * __a, float32x4x4_t __b, const int __c)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_u16 (uint16_t * __a, uint16x8x4_t __b, const int __c)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_u32 (uint32_t * __a, uint32x4x4_t __b, const int __c)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_p16 (poly16_t * __a, poly16x8x4_t __b, const int __c)
+{
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vand_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vandv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vand_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vandv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vand_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vandv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vand_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vandv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vand_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vandv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vand_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vandv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vand_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vanddi (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vand_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vanddi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vandq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vandv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vandq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vandv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vandq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vandv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vandq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vandv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vandq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vandv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vandq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vandv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vandq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vandv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vandq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vandv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vorr_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vorrv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vorr_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vorrv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vorr_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vorrv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vorr_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vorrv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vorr_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vorrv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vorr_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vorrv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vorr_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vorrdi (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vorr_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vorrdi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vorrq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vorrv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vorrq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vorrv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vorrq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vorrv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vorrq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vorrv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vorrq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vorrv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vorrq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vorrv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vorrq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vorrv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vorrq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vorrv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+veor_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_veorv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+veor_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_veorv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+veor_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_veorv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+veor_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_veorv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+veor_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_veorv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+veor_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_veorv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+veor_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_veordi (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+veor_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_veordi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+veorq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_veorv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+veorq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_veorv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+veorq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_veorv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+veorq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_veorv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+veorq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_veorv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+veorq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_veorv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+veorq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_veorv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+veorq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_veorv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vbic_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vbicv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vbic_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vbicv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vbic_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vbicv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vbic_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vbicv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vbic_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vbicv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vbic_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vbicv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vbic_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vbicdi (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vbic_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vbicdi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vbicq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vbicv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vbicq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vbicv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vbicq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vbicv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vbicq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vbicv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vbicq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vbicv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vbicq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vbicv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vbicq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vbicv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vbicq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vbicv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vorn_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vornv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vorn_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vornv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vorn_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vornv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vorn_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vornv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vorn_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vornv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vorn_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vornv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vorn_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vorndi (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vorn_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vorndi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vornq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vornv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vornq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vornv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vornq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vornv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vornq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vornv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vornq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vornv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vornq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vornv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vornq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vornv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vornq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vornv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s8 (int8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv8qi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s16 (int16x4_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s32 (int32x2_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s64 (int64x1_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_f32 (float32x2_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u8 (uint8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u16 (uint16x4_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u32 (uint32x2_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u64 (uint64x1_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_p16 (poly16x4_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s8 (int8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv16qi (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s16 (int16x8_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s32 (int32x4_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s64 (int64x2_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_f32 (float32x4_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u8 (uint8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u16 (uint16x8_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u32 (uint32x4_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u64 (uint64x2_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_p16 (poly16x8_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s8 (int8x8_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s16 (int16x4_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv4hi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s32 (int32x2_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s64 (int64x1_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_f32 (float32x2_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u8 (uint8x8_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u16 (uint16x4_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u32 (uint32x2_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u64 (uint64x1_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_p8 (poly8x8_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s8 (int8x16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s16 (int16x8_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv8hi (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s32 (int32x4_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s64 (int64x2_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_f32 (float32x4_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u8 (uint8x16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u16 (uint16x8_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u32 (uint32x4_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u64 (uint64x2_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_p8 (poly8x16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s8 (int8x8_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv8qi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s16 (int16x4_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv4hi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s32 (int32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv2si (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s64 (int64x1_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfdi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u8 (uint8x8_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u16 (uint16x4_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u32 (uint32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u64 (uint64x1_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfdi ((int64x1_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p8 (poly8x8_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p16 (poly16x4_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s8 (int8x16_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s16 (int16x8_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s32 (int32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv4si (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s64 (int64x2_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv2di (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u8 (uint8x16_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u16 (uint16x8_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u32 (uint32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u64 (uint64x2_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p8 (poly8x16_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p16 (poly16x8_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s8 (int8x8_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv8qi (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s16 (int16x4_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv4hi (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s32 (int32x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv2si (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_f32 (float32x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u8 (uint8x8_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u16 (uint16x4_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u32 (uint32x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u64 (uint64x1_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p8 (poly8x8_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p16 (poly16x4_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s8 (int8x16_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div16qi (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s16 (int16x8_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div8hi (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s32 (int32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div4si (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_f32 (float32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div4sf (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u8 (uint8x16_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u16 (uint16x8_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u32 (uint32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u64 (uint64x2_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p8 (poly8x16_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p16 (poly16x8_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s8 (int8x8_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s16 (int16x4_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s32 (int32x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv2si (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s64 (int64x1_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdidi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_f32 (float32x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u8 (uint8x8_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u16 (uint16x4_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u32 (uint32x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p8 (poly8x8_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p16 (poly16x4_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s8 (int8x16_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s16 (int16x8_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s32 (int32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div4si (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s64 (int64x2_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div2di (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_f32 (float32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div4sf (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u8 (uint8x16_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u16 (uint16x8_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u32 (uint32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p8 (poly8x16_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p16 (poly16x8_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s16 (int16x4_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s32 (int32x2_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s64 (int64x1_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_f32 (float32x2_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u8 (uint8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u16 (uint16x4_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u32 (uint32x2_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u64 (uint64x1_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p8 (poly8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p16 (poly16x4_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s16 (int16x8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s32 (int32x4_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s64 (int64x2_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_f32 (float32x4_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u8 (uint8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u16 (uint16x8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u32 (uint32x4_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u64 (uint64x2_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p8 (poly8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p16 (poly16x8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s8 (int8x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s32 (int32x2_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s64 (int64x1_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_f32 (float32x2_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u8 (uint8x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u16 (uint16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u32 (uint32x2_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u64 (uint64x1_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p8 (poly8x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p16 (poly16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s8 (int8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s32 (int32x4_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s64 (int64x2_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_f32 (float32x4_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u8 (uint8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u16 (uint16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u32 (uint32x4_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u64 (uint64x2_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p8 (poly8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p16 (poly16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s8 (int8x8_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s16 (int16x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s64 (int64x1_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_f32 (float32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u8 (uint8x8_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u16 (uint16x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u32 (uint32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u64 (uint64x1_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2sidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p8 (poly8x8_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p16 (poly16x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s8 (int8x16_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv16qi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s16 (int16x8_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s64 (int64x2_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv2di (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_f32 (float32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv4sf (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u8 (uint8x16_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u16 (uint16x8_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u32 (uint32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u64 (uint64x2_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p8 (poly8x16_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p16 (poly16x8_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s8 (int8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv8qi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s16 (int16x4_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s32 (int32x2_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s64 (int64x1_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_f32 (float32x2_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u16 (uint16x4_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u32 (uint32x2_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u64 (uint64x1_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p8 (poly8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p16 (poly16x4_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s8 (int8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv16qi (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s16 (int16x8_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s32 (int32x4_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s64 (int64x2_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_f32 (float32x4_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u16 (uint16x8_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u32 (uint32x4_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u64 (uint64x2_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_p8 (poly8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_p16 (poly16x8_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s8 (int8x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s16 (int16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv4hi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s32 (int32x2_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s64 (int64x1_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_f32 (float32x2_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u8 (uint8x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u32 (uint32x2_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u64 (uint64x1_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p8 (poly8x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p16 (poly16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s8 (int8x16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s16 (int16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv8hi (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s32 (int32x4_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s64 (int64x2_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_f32 (float32x4_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u8 (uint8x16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u32 (uint32x4_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u64 (uint64x2_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p8 (poly8x16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p16 (poly16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s8 (int8x8_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s16 (int16x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s32 (int32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv2si (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s64 (int64x1_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_f32 (float32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u8 (uint8x8_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u16 (uint16x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u64 (uint64x1_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2sidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p8 (poly8x8_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p16 (poly16x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s8 (int8x16_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s16 (int16x8_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s32 (int32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv4si (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s64 (int64x2_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv2di (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_f32 (float32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv4sf (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u8 (uint8x16_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u16 (uint16x8_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u64 (uint64x2_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p8 (poly8x16_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p16 (poly16x8_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif
+#endif
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include/float.h b/lib/gcc/arm-linux-androideabi/4.7/include/float.h
new file mode 100644
index 0000000..2954cc4
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include/float.h
@@ -0,0 +1,278 @@
+/* Copyright (C) 2002, 2007, 2008, 2009, 2010, 2011
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 5.2.4.2.2 Characteristics of floating types <float.h>
+ */
+
+#ifndef _FLOAT_H___
+#define _FLOAT_H___
+
+/* Radix of exponent representation, b. */
+#undef FLT_RADIX
+#define FLT_RADIX __FLT_RADIX__
+
+/* Number of base-FLT_RADIX digits in the significand, p. */
+#undef FLT_MANT_DIG
+#undef DBL_MANT_DIG
+#undef LDBL_MANT_DIG
+#define FLT_MANT_DIG __FLT_MANT_DIG__
+#define DBL_MANT_DIG __DBL_MANT_DIG__
+#define LDBL_MANT_DIG __LDBL_MANT_DIG__
+
+/* Number of decimal digits, q, such that any floating-point number with q
+ decimal digits can be rounded into a floating-point number with p radix b
+ digits and back again without change to the q decimal digits,
+
+ p * log10(b) if b is a power of 10
+ floor((p - 1) * log10(b)) otherwise
+*/
+#undef FLT_DIG
+#undef DBL_DIG
+#undef LDBL_DIG
+#define FLT_DIG __FLT_DIG__
+#define DBL_DIG __DBL_DIG__
+#define LDBL_DIG __LDBL_DIG__
+
+/* Minimum int x such that FLT_RADIX**(x-1) is a normalized float, emin */
+#undef FLT_MIN_EXP
+#undef DBL_MIN_EXP
+#undef LDBL_MIN_EXP
+#define FLT_MIN_EXP __FLT_MIN_EXP__
+#define DBL_MIN_EXP __DBL_MIN_EXP__
+#define LDBL_MIN_EXP __LDBL_MIN_EXP__
+
+/* Minimum negative integer such that 10 raised to that power is in the
+ range of normalized floating-point numbers,
+
+ ceil(log10(b) * (emin - 1))
+*/
+#undef FLT_MIN_10_EXP
+#undef DBL_MIN_10_EXP
+#undef LDBL_MIN_10_EXP
+#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__
+#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__
+#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__
+
+/* Maximum int x such that FLT_RADIX**(x-1) is a representable float, emax. */
+#undef FLT_MAX_EXP
+#undef DBL_MAX_EXP
+#undef LDBL_MAX_EXP
+#define FLT_MAX_EXP __FLT_MAX_EXP__
+#define DBL_MAX_EXP __DBL_MAX_EXP__
+#define LDBL_MAX_EXP __LDBL_MAX_EXP__
+
+/* Maximum integer such that 10 raised to that power is in the range of
+ representable finite floating-point numbers,
+
+ floor(log10((1 - b**-p) * b**emax))
+*/
+#undef FLT_MAX_10_EXP
+#undef DBL_MAX_10_EXP
+#undef LDBL_MAX_10_EXP
+#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__
+#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__
+#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__
+
+/* Maximum representable finite floating-point number,
+
+ (1 - b**-p) * b**emax
+*/
+#undef FLT_MAX
+#undef DBL_MAX
+#undef LDBL_MAX
+#define FLT_MAX __FLT_MAX__
+#define DBL_MAX __DBL_MAX__
+#define LDBL_MAX __LDBL_MAX__
+
+/* The difference between 1 and the least value greater than 1 that is
+ representable in the given floating point type, b**1-p. */
+#undef FLT_EPSILON
+#undef DBL_EPSILON
+#undef LDBL_EPSILON
+#define FLT_EPSILON __FLT_EPSILON__
+#define DBL_EPSILON __DBL_EPSILON__
+#define LDBL_EPSILON __LDBL_EPSILON__
+
+/* Minimum normalized positive floating-point number, b**(emin - 1). */
+#undef FLT_MIN
+#undef DBL_MIN
+#undef LDBL_MIN
+#define FLT_MIN __FLT_MIN__
+#define DBL_MIN __DBL_MIN__
+#define LDBL_MIN __LDBL_MIN__
+
+/* Addition rounds to 0: zero, 1: nearest, 2: +inf, 3: -inf, -1: unknown. */
+/* ??? This is supposed to change with calls to fesetround in <fenv.h>. */
+#undef FLT_ROUNDS
+#define FLT_ROUNDS 1
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+/* The floating-point expression evaluation method.
+ -1 indeterminate
+ 0 evaluate all operations and constants just to the range and
+ precision of the type
+ 1 evaluate operations and constants of type float and double
+ to the range and precision of the double type, evaluate
+ long double operations and constants to the range and
+ precision of the long double type
+ 2 evaluate all operations and constants to the range and
+ precision of the long double type
+
+ ??? This ought to change with the setting of the fp control word;
+ the value provided by the compiler assumes the widest setting. */
+#undef FLT_EVAL_METHOD
+#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
+
+/* Number of decimal digits, n, such that any floating-point number in the
+ widest supported floating type with pmax radix b digits can be rounded
+ to a floating-point number with n decimal digits and back again without
+ change to the value,
+
+ pmax * log10(b) if b is a power of 10
+ ceil(1 + pmax * log10(b)) otherwise
+*/
+#undef DECIMAL_DIG
+#define DECIMAL_DIG __DECIMAL_DIG__
+
+#endif /* C99 */
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
+/* Versions of DECIMAL_DIG for each floating-point type. */
+#undef FLT_DECIMAL_DIG
+#undef DBL_DECIMAL_DIG
+#undef LDBL_DECIMAL_DIG
+#define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__
+#define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__
+#define LDBL_DECIMAL_DIG __DECIMAL_DIG__
+
+/* Whether types support subnormal numbers. */
+#undef FLT_HAS_SUBNORM
+#undef DBL_HAS_SUBNORM
+#undef LDBL_HAS_SUBNORM
+#define FLT_HAS_SUBNORM __FLT_HAS_DENORM__
+#define DBL_HAS_SUBNORM __DBL_HAS_DENORM__
+#define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__
+
+/* Minimum positive values, including subnormals. */
+#undef FLT_TRUE_MIN
+#undef DBL_TRUE_MIN
+#undef LDBL_TRUE_MIN
+#if __FLT_HAS_DENORM__
+#define FLT_TRUE_MIN __FLT_DENORM_MIN__
+#else
+#define FLT_TRUE_MIN __FLT_MIN__
+#endif
+#if __DBL_HAS_DENORM__
+#define DBL_TRUE_MIN __DBL_DENORM_MIN__
+#else
+#define DBL_TRUE_MIN __DBL_MIN__
+#endif
+#if __LDBL_HAS_DENORM__
+#define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
+#else
+#define LDBL_TRUE_MIN __LDBL_MIN__
+#endif
+
+#endif /* C11 */
+
+#ifdef __STDC_WANT_DEC_FP__
+/* Draft Technical Report 24732, extension for decimal floating-point
+ arithmetic: Characteristic of decimal floating types <float.h>. */
+
+/* Number of base-FLT_RADIX digits in the significand, p. */
+#undef DEC32_MANT_DIG
+#undef DEC64_MANT_DIG
+#undef DEC128_MANT_DIG
+#define DEC32_MANT_DIG __DEC32_MANT_DIG__
+#define DEC64_MANT_DIG __DEC64_MANT_DIG__
+#define DEC128_MANT_DIG __DEC128_MANT_DIG__
+
+/* Minimum exponent. */
+#undef DEC32_MIN_EXP
+#undef DEC64_MIN_EXP
+#undef DEC128_MIN_EXP
+#define DEC32_MIN_EXP __DEC32_MIN_EXP__
+#define DEC64_MIN_EXP __DEC64_MIN_EXP__
+#define DEC128_MIN_EXP __DEC128_MIN_EXP__
+
+/* Maximum exponent. */
+#undef DEC32_MAX_EXP
+#undef DEC64_MAX_EXP
+#undef DEC128_MAX_EXP
+#define DEC32_MAX_EXP __DEC32_MAX_EXP__
+#define DEC64_MAX_EXP __DEC64_MAX_EXP__
+#define DEC128_MAX_EXP __DEC128_MAX_EXP__
+
+/* Maximum representable finite decimal floating-point number
+ (there are 6, 15, and 33 9s after the decimal points respectively). */
+#undef DEC32_MAX
+#undef DEC64_MAX
+#undef DEC128_MAX
+#define DEC32_MAX __DEC32_MAX__
+#define DEC64_MAX __DEC64_MAX__
+#define DEC128_MAX __DEC128_MAX__
+
+/* The difference between 1 and the least value greater than 1 that is
+ representable in the given floating point type. */
+#undef DEC32_EPSILON
+#undef DEC64_EPSILON
+#undef DEC128_EPSILON
+#define DEC32_EPSILON __DEC32_EPSILON__
+#define DEC64_EPSILON __DEC64_EPSILON__
+#define DEC128_EPSILON __DEC128_EPSILON__
+
+/* Minimum normalized positive floating-point number. */
+#undef DEC32_MIN
+#undef DEC64_MIN
+#undef DEC128_MIN
+#define DEC32_MIN __DEC32_MIN__
+#define DEC64_MIN __DEC64_MIN__
+#define DEC128_MIN __DEC128_MIN__
+
+/* Minimum subnormal positive floating-point number. */
+#undef DEC32_SUBNORMAL_MIN
+#undef DEC64_SUBNORMAL_MIN
+#undef DEC128_SUBNORMAL_MIN
+#define DEC32_SUBNORMAL_MIN __DEC32_SUBNORMAL_MIN__
+#define DEC64_SUBNORMAL_MIN __DEC64_SUBNORMAL_MIN__
+#define DEC128_SUBNORMAL_MIN __DEC128_SUBNORMAL_MIN__
+
+/* The floating-point expression evaluation method.
+ -1 indeterminate
+ 0 evaluate all operations and constants just to the range and
+ precision of the type
+ 1 evaluate operations and constants of type _Decimal32
+ and _Decimal64 to the range and precision of the _Decimal64
+ type, evaluate _Decimal128 operations and constants to the
+ range and precision of the _Decimal128 type;
+ 2 evaluate all operations and constants to the range and
+ precision of the _Decimal128 type. */
+
+#undef DEC_EVAL_METHOD
+#define DEC_EVAL_METHOD __DEC_EVAL_METHOD__
+
+#endif /* __STDC_WANT_DEC_FP__ */
+
+#endif /* _FLOAT_H___ */
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include/iso646.h b/lib/gcc/arm-linux-androideabi/4.7/include/iso646.h
new file mode 100644
index 0000000..28ff9d1
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include/iso646.h
@@ -0,0 +1,45 @@
+/* Copyright (C) 1997, 1999, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 7.9 Alternative spellings <iso646.h>
+ */
+
+#ifndef _ISO646_H
+#define _ISO646_H
+
+#ifndef __cplusplus
+#define and &&
+#define and_eq &=
+#define bitand &
+#define bitor |
+#define compl ~
+#define not !
+#define not_eq !=
+#define or ||
+#define or_eq |=
+#define xor ^
+#define xor_eq ^=
+#endif
+
+#endif
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include/mmintrin.h b/lib/gcc/arm-linux-androideabi/4.7/include/mmintrin.h
new file mode 100644
index 0000000..2cc500d
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include/mmintrin.h
@@ -0,0 +1,1254 @@
+/* Copyright (C) 2002, 2003, 2004, 2009 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _MMINTRIN_H_INCLUDED
+#define _MMINTRIN_H_INCLUDED
+
+/* The data type intended for user use. */
+typedef unsigned long long __m64, __int64;
+
+/* Internal data types for implementing the intrinsics. */
+typedef int __v2si __attribute__ ((vector_size (8)));
+typedef short __v4hi __attribute__ ((vector_size (8)));
+typedef char __v8qi __attribute__ ((vector_size (8)));
+
+/* "Convert" __m64 and __int64 into each other. */
+static __inline __m64
+_mm_cvtsi64_m64 (__int64 __i)
+{
+ return __i;
+}
+
+static __inline __int64
+_mm_cvtm64_si64 (__m64 __i)
+{
+ return __i;
+}
+
+static __inline int
+_mm_cvtsi64_si32 (__int64 __i)
+{
+ return __i;
+}
+
+static __inline __int64
+_mm_cvtsi32_si64 (int __i)
+{
+ return __i;
+}
+
+/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
+ values of the result, all with signed saturation. */
+static __inline __m64
+_mm_packs_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackhss ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
+ the result, and the two 32-bit values from M2 into the upper two 16-bit
+ values of the result, all with signed saturation. */
+static __inline __m64
+_mm_packs_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackwss ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Copy the 64-bit value from M1 into the lower 32-bits of the result, and
+ the 64-bit value from M2 into the upper 32-bits of the result, all with
+ signed saturation for values that do not fit exactly into 32-bits. */
+static __inline __m64
+_mm_packs_pi64 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackdss ((long long)__m1, (long long)__m2);
+}
+
+/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
+ values of the result, all with unsigned saturation. */
+static __inline __m64
+_mm_packs_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackhus ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Pack the two 32-bit values from M1 into the lower two 16-bit values of
+ the result, and the two 32-bit values from M2 into the upper two 16-bit
+ values of the result, all with unsigned saturation. */
+static __inline __m64
+_mm_packs_pu32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackwus ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Copy the 64-bit value from M1 into the lower 32-bits of the result, and
+ the 64-bit value from M2 into the upper 32-bits of the result, all with
+ unsigned saturation for values that do not fit exactly into 32-bits. */
+static __inline __m64
+_mm_packs_pu64 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackdus ((long long)__m1, (long long)__m2);
+}
+
+/* Interleave the four 8-bit values from the high half of M1 with the four
+ 8-bit values from the high half of M2. */
+static __inline __m64
+_mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckihb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Interleave the two 16-bit values from the high half of M1 with the two
+ 16-bit values from the high half of M2. */
+static __inline __m64
+_mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckihh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Interleave the 32-bit value from the high half of M1 with the 32-bit
+ value from the high half of M2. */
+static __inline __m64
+_mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckihw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Interleave the four 8-bit values from the low half of M1 with the four
+ 8-bit values from the low half of M2. */
+static __inline __m64
+_mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckilb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Interleave the two 16-bit values from the low half of M1 with the two
+ 16-bit values from the low half of M2. */
+static __inline __m64
+_mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckilh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Interleave the 32-bit value from the low half of M1 with the 32-bit
+ value from the low half of M2. */
+static __inline __m64
+_mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckilw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Take the four 8-bit values from the low half of M1, sign extend them,
+ and return the result as a vector of four 16-bit quantities. */
+static __inline __m64
+_mm_unpackel_pi8 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckelsb ((__v8qi)__m1);
+}
+
+/* Take the two 16-bit values from the low half of M1, sign extend them,
+ and return the result as a vector of two 32-bit quantities. */
+static __inline __m64
+_mm_unpackel_pi16 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckelsh ((__v4hi)__m1);
+}
+
+/* Take the 32-bit value from the low half of M1, and return it sign extended
+ to 64 bits. */
+static __inline __m64
+_mm_unpackel_pi32 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckelsw ((__v2si)__m1);
+}
+
+/* Take the four 8-bit values from the high half of M1, sign extend them,
+ and return the result as a vector of four 16-bit quantities. */
+static __inline __m64
+_mm_unpackeh_pi8 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehsb ((__v8qi)__m1);
+}
+
+/* Take the two 16-bit values from the high half of M1, sign extend them,
+ and return the result as a vector of two 32-bit quantities. */
+static __inline __m64
+_mm_unpackeh_pi16 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehsh ((__v4hi)__m1);
+}
+
+/* Take the 32-bit value from the high half of M1, and return it sign extended
+ to 64 bits. */
+static __inline __m64
+_mm_unpackeh_pi32 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehsw ((__v2si)__m1);
+}
+
+/* Take the four 8-bit values from the low half of M1, zero extend them,
+ and return the result as a vector of four 16-bit quantities. */
+static __inline __m64
+_mm_unpackel_pu8 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckelub ((__v8qi)__m1);
+}
+
+/* Take the two 16-bit values from the low half of M1, zero extend them,
+ and return the result as a vector of two 32-bit quantities. */
+static __inline __m64
+_mm_unpackel_pu16 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckeluh ((__v4hi)__m1);
+}
+
+/* Take the 32-bit value from the low half of M1, and return it zero extended
+ to 64 bits. */
+static __inline __m64
+_mm_unpackel_pu32 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckeluw ((__v2si)__m1);
+}
+
+/* Take the four 8-bit values from the high half of M1, zero extend them,
+ and return the result as a vector of four 16-bit quantities. */
+static __inline __m64
+_mm_unpackeh_pu8 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehub ((__v8qi)__m1);
+}
+
+/* Take the two 16-bit values from the high half of M1, zero extend them,
+ and return the result as a vector of two 32-bit quantities. */
+static __inline __m64
+_mm_unpackeh_pu16 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehuh ((__v4hi)__m1);
+}
+
+/* Take the 32-bit value from the high half of M1, and return it zero extended
+ to 64 bits. */
+static __inline __m64
+_mm_unpackeh_pu32 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehuw ((__v2si)__m1);
+}
+
+/* Add the 8-bit values in M1 to the 8-bit values in M2. */
+static __inline __m64
+_mm_add_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2. */
+static __inline __m64
+_mm_add_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Add the 32-bit values in M1 to the 32-bit values in M2. */
+static __inline __m64
+_mm_add_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddbss ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddhss ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Add the 32-bit values in M1 to the 32-bit values in M2 using signed
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddwss ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pu8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddbus ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddhus ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Add the 32-bit values in M1 to the 32-bit values in M2 using unsigned
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pu32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddwus ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
+static __inline __m64
+_mm_sub_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
+static __inline __m64
+_mm_sub_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
+static __inline __m64
+_mm_sub_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
+ saturating arithmetic. */
+static __inline __m64
+_mm_subs_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubbss ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
+ signed saturating arithmetic. */
+static __inline __m64
+_mm_subs_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubhss ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Subtract the 32-bit values in M2 from the 32-bit values in M1 using
+ signed saturating arithmetic. */
+static __inline __m64
+_mm_subs_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubwss ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
+ unsigned saturating arithmetic. */
+static __inline __m64
+_mm_subs_pu8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubbus ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
+ unsigned saturating arithmetic. */
+static __inline __m64
+_mm_subs_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubhus ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Subtract the 32-bit values in M2 from the 32-bit values in M1 using
+ unsigned saturating arithmetic. */
+static __inline __m64
+_mm_subs_pu32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubwus ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
+ four 32-bit intermediate results, which are then summed by pairs to
+ produce two 32-bit results. */
+static __inline __m64
+_mm_madd_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmadds ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
+ four 32-bit intermediate results, which are then summed by pairs to
+ produce two 32-bit results. */
+static __inline __m64
+_mm_madd_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmaddu ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
+ M2 and produce the high 16 bits of the 32-bit results. */
+static __inline __m64
+_mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmulsm ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
+ M2 and produce the high 16 bits of the 32-bit results. */
+static __inline __m64
+_mm_mulhi_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmulum ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
+ the low 16 bits of the results. */
+static __inline __m64
+_mm_mullo_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmulul ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Shift four 16-bit values in M left by COUNT. */
+static __inline __m64
+_mm_sll_pi16 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsllh ((__v4hi)__m, __count);
+}
+
+static __inline __m64
+_mm_slli_pi16 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsllhi ((__v4hi)__m, __count);
+}
+
+/* Shift two 32-bit values in M left by COUNT. */
+static __inline __m64
+_mm_sll_pi32 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsllw ((__v2si)__m, __count);
+}
+
+static __inline __m64
+_mm_slli_pi32 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsllwi ((__v2si)__m, __count);
+}
+
+/* Shift the 64-bit value in M left by COUNT. */
+static __inline __m64
+_mm_sll_si64 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wslld (__m, __count);
+}
+
+static __inline __m64
+_mm_slli_si64 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wslldi (__m, __count);
+}
+
+/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
+static __inline __m64
+_mm_sra_pi16 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrah ((__v4hi)__m, __count);
+}
+
+static __inline __m64
+_mm_srai_pi16 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrahi ((__v4hi)__m, __count);
+}
+
+/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
+static __inline __m64
+_mm_sra_pi32 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsraw ((__v2si)__m, __count);
+}
+
+static __inline __m64
+_mm_srai_pi32 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrawi ((__v2si)__m, __count);
+}
+
+/* Shift the 64-bit value in M right by COUNT; shift in the sign bit. */
+static __inline __m64
+_mm_sra_si64 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrad (__m, __count);
+}
+
+static __inline __m64
+_mm_srai_si64 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsradi (__m, __count);
+}
+
+/* Shift four 16-bit values in M right by COUNT; shift in zeros. */
+static __inline __m64
+_mm_srl_pi16 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrlh ((__v4hi)__m, __count);
+}
+
+static __inline __m64
+_mm_srli_pi16 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrlhi ((__v4hi)__m, __count);
+}
+
+/* Shift two 32-bit values in M right by COUNT; shift in zeros. */
+static __inline __m64
+_mm_srl_pi32 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrlw ((__v2si)__m, __count);
+}
+
+static __inline __m64
+_mm_srli_pi32 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrlwi ((__v2si)__m, __count);
+}
+
+/* Shift the 64-bit value in M left by COUNT; shift in zeros. */
+static __inline __m64
+_mm_srl_si64 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrld (__m, __count);
+}
+
+static __inline __m64
+_mm_srli_si64 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrldi (__m, __count);
+}
+
+/* Rotate four 16-bit values in M right by COUNT. */
+static __inline __m64
+_mm_ror_pi16 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wrorh ((__v4hi)__m, __count);
+}
+
+static __inline __m64
+_mm_rori_pi16 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wrorhi ((__v4hi)__m, __count);
+}
+
+/* Rotate two 32-bit values in M right by COUNT. */
+static __inline __m64
+_mm_ror_pi32 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wrorw ((__v2si)__m, __count);
+}
+
+static __inline __m64
+_mm_rori_pi32 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wrorwi ((__v2si)__m, __count);
+}
+
+/* Rotate two 64-bit values in M right by COUNT. */
+static __inline __m64
+_mm_ror_si64 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wrord (__m, __count);
+}
+
+static __inline __m64
+_mm_rori_si64 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wrordi (__m, __count);
+}
+
+/* Bit-wise AND the 64-bit values in M1 and M2. */
+static __inline __m64
+_mm_and_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_arm_wand (__m1, __m2);
+}
+
+/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
+ 64-bit value in M2. */
+static __inline __m64
+_mm_andnot_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_arm_wandn (__m1, __m2);
+}
+
+/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
+static __inline __m64
+_mm_or_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_arm_wor (__m1, __m2);
+}
+
+/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
+static __inline __m64
+_mm_xor_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_arm_wxor (__m1, __m2);
+}
+
+/* Compare eight 8-bit values. The result of the comparison is 0xFF if the
+ test is true and zero if false. */
+static __inline __m64
+_mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpeqb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtsb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pu8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtub ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Compare four 16-bit values. The result of the comparison is 0xFFFF if
+ the test is true and zero if false. */
+static __inline __m64
+_mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpeqh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtsh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtuh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
+ the test is true and zero if false. */
+static __inline __m64
+_mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpeqw ((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtsw ((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pu32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtuw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Element-wise multiplication of unsigned 16-bit values __B and __C, followed
+ by accumulate across all elements and __A. */
+static __inline __m64
+_mm_mac_pu16 (__m64 __A, __m64 __B, __m64 __C)
+{
+ return __builtin_arm_wmacu (__A, (__v4hi)__B, (__v4hi)__C);
+}
+
+/* Element-wise multiplication of signed 16-bit values __B and __C, followed
+ by accumulate across all elements and __A. */
+static __inline __m64
+_mm_mac_pi16 (__m64 __A, __m64 __B, __m64 __C)
+{
+ return __builtin_arm_wmacs (__A, (__v4hi)__B, (__v4hi)__C);
+}
+
+/* Element-wise multiplication of unsigned 16-bit values __B and __C, followed
+ by accumulate across all elements. */
+static __inline __m64
+_mm_macz_pu16 (__m64 __A, __m64 __B)
+{
+ return __builtin_arm_wmacuz ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Element-wise multiplication of signed 16-bit values __B and __C, followed
+ by accumulate across all elements. */
+static __inline __m64
+_mm_macz_pi16 (__m64 __A, __m64 __B)
+{
+ return __builtin_arm_wmacsz ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Accumulate across all unsigned 8-bit values in __A. */
+static __inline __m64
+_mm_acc_pu8 (__m64 __A)
+{
+ return __builtin_arm_waccb ((__v8qi)__A);
+}
+
+/* Accumulate across all unsigned 16-bit values in __A. */
+static __inline __m64
+_mm_acc_pu16 (__m64 __A)
+{
+ return __builtin_arm_wacch ((__v4hi)__A);
+}
+
+/* Accumulate across all unsigned 32-bit values in __A. */
+static __inline __m64
+_mm_acc_pu32 (__m64 __A)
+{
+ return __builtin_arm_waccw ((__v2si)__A);
+}
+
+static __inline __m64
+_mm_mia_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmia (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miaph_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiaph (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miabb_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiabb (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miabt_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiabt (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miatb_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiatb (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miatt_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiatt (__A, __B, __C);
+}
+
+/* Extract one of the elements of A and sign extend. The selector N must
+ be immediate. */
+#define _mm_extract_pi8(A, N) __builtin_arm_textrmsb ((__v8qi)(A), (N))
+#define _mm_extract_pi16(A, N) __builtin_arm_textrmsh ((__v4hi)(A), (N))
+#define _mm_extract_pi32(A, N) __builtin_arm_textrmsw ((__v2si)(A), (N))
+
+/* Extract one of the elements of A and zero extend. The selector N must
+ be immediate. */
+#define _mm_extract_pu8(A, N) __builtin_arm_textrmub ((__v8qi)(A), (N))
+#define _mm_extract_pu16(A, N) __builtin_arm_textrmuh ((__v4hi)(A), (N))
+#define _mm_extract_pu32(A, N) __builtin_arm_textrmuw ((__v2si)(A), (N))
+
+/* Inserts word D into one of the elements of A. The selector N must be
+ immediate. */
+#define _mm_insert_pi8(A, D, N) \
+ ((__m64) __builtin_arm_tinsrb ((__v8qi)(A), (D), (N)))
+#define _mm_insert_pi16(A, D, N) \
+ ((__m64) __builtin_arm_tinsrh ((__v4hi)(A), (D), (N)))
+#define _mm_insert_pi32(A, D, N) \
+ ((__m64) __builtin_arm_tinsrw ((__v2si)(A), (D), (N)))
+
+/* Compute the element-wise maximum of signed 8-bit values. */
+static __inline __m64
+_mm_max_pi8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxsb ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the element-wise maximum of signed 16-bit values. */
+static __inline __m64
+_mm_max_pi16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxsh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the element-wise maximum of signed 32-bit values. */
+static __inline __m64
+_mm_max_pi32 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxsw ((__v2si)__A, (__v2si)__B);
+}
+
+/* Compute the element-wise maximum of unsigned 8-bit values. */
+static __inline __m64
+_mm_max_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxub ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the element-wise maximum of unsigned 16-bit values. */
+static __inline __m64
+_mm_max_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxuh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the element-wise maximum of unsigned 32-bit values. */
+static __inline __m64
+_mm_max_pu32 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxuw ((__v2si)__A, (__v2si)__B);
+}
+
+/* Compute the element-wise minimum of signed 16-bit values. */
+static __inline __m64
+_mm_min_pi8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminsb ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the element-wise minimum of signed 16-bit values. */
+static __inline __m64
+_mm_min_pi16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminsh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the element-wise minimum of signed 32-bit values. */
+static __inline __m64
+_mm_min_pi32 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminsw ((__v2si)__A, (__v2si)__B);
+}
+
+/* Compute the element-wise minimum of unsigned 16-bit values. */
+static __inline __m64
+_mm_min_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminub ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the element-wise minimum of unsigned 16-bit values. */
+static __inline __m64
+_mm_min_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminuh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the element-wise minimum of unsigned 32-bit values. */
+static __inline __m64
+_mm_min_pu32 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminuw ((__v2si)__A, (__v2si)__B);
+}
+
+/* Create an 8-bit mask of the signs of 8-bit values. */
+static __inline int
+_mm_movemask_pi8 (__m64 __A)
+{
+ return __builtin_arm_tmovmskb ((__v8qi)__A);
+}
+
+/* Create an 8-bit mask of the signs of 16-bit values. */
+static __inline int
+_mm_movemask_pi16 (__m64 __A)
+{
+ return __builtin_arm_tmovmskh ((__v4hi)__A);
+}
+
+/* Create an 8-bit mask of the signs of 32-bit values. */
+static __inline int
+_mm_movemask_pi32 (__m64 __A)
+{
+ return __builtin_arm_tmovmskw ((__v2si)__A);
+}
+
+/* Return a combination of the four 16-bit values in A. The selector
+ must be an immediate. */
+#define _mm_shuffle_pi16(A, N) \
+ ((__m64) __builtin_arm_wshufh ((__v4hi)(A), (N)))
+
+
+/* Compute the rounded averages of the unsigned 8-bit values in A and B. */
+static __inline __m64
+_mm_avg_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wavg2br ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the rounded averages of the unsigned 16-bit values in A and B. */
+static __inline __m64
+_mm_avg_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wavg2hr ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the averages of the unsigned 8-bit values in A and B. */
+static __inline __m64
+_mm_avg2_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wavg2b ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the averages of the unsigned 16-bit values in A and B. */
+static __inline __m64
+_mm_avg2_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wavg2h ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 8-bit
+ values in A and B. Return the value in the lower 16-bit word; the
+ upper words are cleared. */
+static __inline __m64
+_mm_sad_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wsadb ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 16-bit
+ values in A and B. Return the value in the lower 32-bit word; the
+ upper words are cleared. */
+static __inline __m64
+_mm_sad_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wsadh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 8-bit
+ values in A and B. Return the value in the lower 16-bit word; the
+ upper words are cleared. */
+static __inline __m64
+_mm_sadz_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wsadbz ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 16-bit
+ values in A and B. Return the value in the lower 32-bit word; the
+ upper words are cleared. */
+static __inline __m64
+_mm_sadz_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wsadhz ((__v4hi)__A, (__v4hi)__B);
+}
+
+static __inline __m64
+_mm_align_si64 (__m64 __A, __m64 __B, int __C)
+{
+ return (__m64) __builtin_arm_walign ((__v8qi)__A, (__v8qi)__B, __C);
+}
+
+/* Creates a 64-bit zero. */
+static __inline __m64
+_mm_setzero_si64 (void)
+{
+ return __builtin_arm_wzero ();
+}
+
+/* Set and Get arbitrary iWMMXt Control registers.
+ Note only registers 0-3 and 8-11 are currently defined,
+ the rest are reserved. */
+
+static __inline void
+_mm_setwcx (const int __value, const int __regno)
+{
+ switch (__regno)
+ {
+ case 0: __builtin_arm_setwcx (__value, 0); break;
+ case 1: __builtin_arm_setwcx (__value, 1); break;
+ case 2: __builtin_arm_setwcx (__value, 2); break;
+ case 3: __builtin_arm_setwcx (__value, 3); break;
+ case 8: __builtin_arm_setwcx (__value, 8); break;
+ case 9: __builtin_arm_setwcx (__value, 9); break;
+ case 10: __builtin_arm_setwcx (__value, 10); break;
+ case 11: __builtin_arm_setwcx (__value, 11); break;
+ default: break;
+ }
+}
+
+static __inline int
+_mm_getwcx (const int __regno)
+{
+ switch (__regno)
+ {
+ case 0: return __builtin_arm_getwcx (0);
+ case 1: return __builtin_arm_getwcx (1);
+ case 2: return __builtin_arm_getwcx (2);
+ case 3: return __builtin_arm_getwcx (3);
+ case 8: return __builtin_arm_getwcx (8);
+ case 9: return __builtin_arm_getwcx (9);
+ case 10: return __builtin_arm_getwcx (10);
+ case 11: return __builtin_arm_getwcx (11);
+ default: return 0;
+ }
+}
+
+/* Creates a vector of two 32-bit values; I0 is least significant. */
+static __inline __m64
+_mm_set_pi32 (int __i1, int __i0)
+{
+ union {
+ __m64 __q;
+ struct {
+ unsigned int __i0;
+ unsigned int __i1;
+ } __s;
+ } __u;
+
+ __u.__s.__i0 = __i0;
+ __u.__s.__i1 = __i1;
+
+ return __u.__q;
+}
+
+/* Creates a vector of four 16-bit values; W0 is least significant. */
+static __inline __m64
+_mm_set_pi16 (short __w3, short __w2, short __w1, short __w0)
+{
+ unsigned int __i1 = (unsigned short)__w3 << 16 | (unsigned short)__w2;
+ unsigned int __i0 = (unsigned short)__w1 << 16 | (unsigned short)__w0;
+ return _mm_set_pi32 (__i1, __i0);
+
+}
+
+/* Creates a vector of eight 8-bit values; B0 is least significant. */
+static __inline __m64
+_mm_set_pi8 (char __b7, char __b6, char __b5, char __b4,
+ char __b3, char __b2, char __b1, char __b0)
+{
+ unsigned int __i1, __i0;
+
+ __i1 = (unsigned char)__b7;
+ __i1 = __i1 << 8 | (unsigned char)__b6;
+ __i1 = __i1 << 8 | (unsigned char)__b5;
+ __i1 = __i1 << 8 | (unsigned char)__b4;
+
+ __i0 = (unsigned char)__b3;
+ __i0 = __i0 << 8 | (unsigned char)__b2;
+ __i0 = __i0 << 8 | (unsigned char)__b1;
+ __i0 = __i0 << 8 | (unsigned char)__b0;
+
+ return _mm_set_pi32 (__i1, __i0);
+}
+
+/* Similar, but with the arguments in reverse order. */
+static __inline __m64
+_mm_setr_pi32 (int __i0, int __i1)
+{
+ return _mm_set_pi32 (__i1, __i0);
+}
+
+static __inline __m64
+_mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3)
+{
+ return _mm_set_pi16 (__w3, __w2, __w1, __w0);
+}
+
+static __inline __m64
+_mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3,
+ char __b4, char __b5, char __b6, char __b7)
+{
+ return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
+}
+
+/* Creates a vector of two 32-bit values, both elements containing I. */
+static __inline __m64
+_mm_set1_pi32 (int __i)
+{
+ return _mm_set_pi32 (__i, __i);
+}
+
+/* Creates a vector of four 16-bit values, all elements containing W. */
+static __inline __m64
+_mm_set1_pi16 (short __w)
+{
+ unsigned int __i = (unsigned short)__w << 16 | (unsigned short)__w;
+ return _mm_set1_pi32 (__i);
+}
+
+/* Creates a vector of four 16-bit values, all elements containing B. */
+static __inline __m64
+_mm_set1_pi8 (char __b)
+{
+ unsigned int __w = (unsigned char)__b << 8 | (unsigned char)__b;
+ unsigned int __i = __w << 16 | __w;
+ return _mm_set1_pi32 (__i);
+}
+
+/* Convert an integer to a __m64 object. */
+static __inline __m64
+_m_from_int (int __a)
+{
+ return (__m64)__a;
+}
+
+#define _m_packsswb _mm_packs_pi16
+#define _m_packssdw _mm_packs_pi32
+#define _m_packuswb _mm_packs_pu16
+#define _m_packusdw _mm_packs_pu32
+#define _m_packssqd _mm_packs_pi64
+#define _m_packusqd _mm_packs_pu64
+#define _mm_packs_si64 _mm_packs_pi64
+#define _mm_packs_su64 _mm_packs_pu64
+#define _m_punpckhbw _mm_unpackhi_pi8
+#define _m_punpckhwd _mm_unpackhi_pi16
+#define _m_punpckhdq _mm_unpackhi_pi32
+#define _m_punpcklbw _mm_unpacklo_pi8
+#define _m_punpcklwd _mm_unpacklo_pi16
+#define _m_punpckldq _mm_unpacklo_pi32
+#define _m_punpckehsbw _mm_unpackeh_pi8
+#define _m_punpckehswd _mm_unpackeh_pi16
+#define _m_punpckehsdq _mm_unpackeh_pi32
+#define _m_punpckehubw _mm_unpackeh_pu8
+#define _m_punpckehuwd _mm_unpackeh_pu16
+#define _m_punpckehudq _mm_unpackeh_pu32
+#define _m_punpckelsbw _mm_unpackel_pi8
+#define _m_punpckelswd _mm_unpackel_pi16
+#define _m_punpckelsdq _mm_unpackel_pi32
+#define _m_punpckelubw _mm_unpackel_pu8
+#define _m_punpckeluwd _mm_unpackel_pu16
+#define _m_punpckeludq _mm_unpackel_pu32
+#define _m_paddb _mm_add_pi8
+#define _m_paddw _mm_add_pi16
+#define _m_paddd _mm_add_pi32
+#define _m_paddsb _mm_adds_pi8
+#define _m_paddsw _mm_adds_pi16
+#define _m_paddsd _mm_adds_pi32
+#define _m_paddusb _mm_adds_pu8
+#define _m_paddusw _mm_adds_pu16
+#define _m_paddusd _mm_adds_pu32
+#define _m_psubb _mm_sub_pi8
+#define _m_psubw _mm_sub_pi16
+#define _m_psubd _mm_sub_pi32
+#define _m_psubsb _mm_subs_pi8
+#define _m_psubsw _mm_subs_pi16
+#define _m_psubuw _mm_subs_pi32
+#define _m_psubusb _mm_subs_pu8
+#define _m_psubusw _mm_subs_pu16
+#define _m_psubusd _mm_subs_pu32
+#define _m_pmaddwd _mm_madd_pi16
+#define _m_pmadduwd _mm_madd_pu16
+#define _m_pmulhw _mm_mulhi_pi16
+#define _m_pmulhuw _mm_mulhi_pu16
+#define _m_pmullw _mm_mullo_pi16
+#define _m_pmacsw _mm_mac_pi16
+#define _m_pmacuw _mm_mac_pu16
+#define _m_pmacszw _mm_macz_pi16
+#define _m_pmacuzw _mm_macz_pu16
+#define _m_paccb _mm_acc_pu8
+#define _m_paccw _mm_acc_pu16
+#define _m_paccd _mm_acc_pu32
+#define _m_pmia _mm_mia_si64
+#define _m_pmiaph _mm_miaph_si64
+#define _m_pmiabb _mm_miabb_si64
+#define _m_pmiabt _mm_miabt_si64
+#define _m_pmiatb _mm_miatb_si64
+#define _m_pmiatt _mm_miatt_si64
+#define _m_psllw _mm_sll_pi16
+#define _m_psllwi _mm_slli_pi16
+#define _m_pslld _mm_sll_pi32
+#define _m_pslldi _mm_slli_pi32
+#define _m_psllq _mm_sll_si64
+#define _m_psllqi _mm_slli_si64
+#define _m_psraw _mm_sra_pi16
+#define _m_psrawi _mm_srai_pi16
+#define _m_psrad _mm_sra_pi32
+#define _m_psradi _mm_srai_pi32
+#define _m_psraq _mm_sra_si64
+#define _m_psraqi _mm_srai_si64
+#define _m_psrlw _mm_srl_pi16
+#define _m_psrlwi _mm_srli_pi16
+#define _m_psrld _mm_srl_pi32
+#define _m_psrldi _mm_srli_pi32
+#define _m_psrlq _mm_srl_si64
+#define _m_psrlqi _mm_srli_si64
+#define _m_prorw _mm_ror_pi16
+#define _m_prorwi _mm_rori_pi16
+#define _m_prord _mm_ror_pi32
+#define _m_prordi _mm_rori_pi32
+#define _m_prorq _mm_ror_si64
+#define _m_prorqi _mm_rori_si64
+#define _m_pand _mm_and_si64
+#define _m_pandn _mm_andnot_si64
+#define _m_por _mm_or_si64
+#define _m_pxor _mm_xor_si64
+#define _m_pcmpeqb _mm_cmpeq_pi8
+#define _m_pcmpeqw _mm_cmpeq_pi16
+#define _m_pcmpeqd _mm_cmpeq_pi32
+#define _m_pcmpgtb _mm_cmpgt_pi8
+#define _m_pcmpgtub _mm_cmpgt_pu8
+#define _m_pcmpgtw _mm_cmpgt_pi16
+#define _m_pcmpgtuw _mm_cmpgt_pu16
+#define _m_pcmpgtd _mm_cmpgt_pi32
+#define _m_pcmpgtud _mm_cmpgt_pu32
+#define _m_pextrb _mm_extract_pi8
+#define _m_pextrw _mm_extract_pi16
+#define _m_pextrd _mm_extract_pi32
+#define _m_pextrub _mm_extract_pu8
+#define _m_pextruw _mm_extract_pu16
+#define _m_pextrud _mm_extract_pu32
+#define _m_pinsrb _mm_insert_pi8
+#define _m_pinsrw _mm_insert_pi16
+#define _m_pinsrd _mm_insert_pi32
+#define _m_pmaxsb _mm_max_pi8
+#define _m_pmaxsw _mm_max_pi16
+#define _m_pmaxsd _mm_max_pi32
+#define _m_pmaxub _mm_max_pu8
+#define _m_pmaxuw _mm_max_pu16
+#define _m_pmaxud _mm_max_pu32
+#define _m_pminsb _mm_min_pi8
+#define _m_pminsw _mm_min_pi16
+#define _m_pminsd _mm_min_pi32
+#define _m_pminub _mm_min_pu8
+#define _m_pminuw _mm_min_pu16
+#define _m_pminud _mm_min_pu32
+#define _m_pmovmskb _mm_movemask_pi8
+#define _m_pmovmskw _mm_movemask_pi16
+#define _m_pmovmskd _mm_movemask_pi32
+#define _m_pshufw _mm_shuffle_pi16
+#define _m_pavgb _mm_avg_pu8
+#define _m_pavgw _mm_avg_pu16
+#define _m_pavg2b _mm_avg2_pu8
+#define _m_pavg2w _mm_avg2_pu16
+#define _m_psadbw _mm_sad_pu8
+#define _m_psadwd _mm_sad_pu16
+#define _m_psadzbw _mm_sadz_pu8
+#define _m_psadzwd _mm_sadz_pu16
+#define _m_paligniq _mm_align_si64
+#define _m_cvt_si2pi _mm_cvtsi64_m64
+#define _m_cvt_pi2si _mm_cvtm64_si64
+
+#endif /* _MMINTRIN_H_INCLUDED */
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include/stdalign.h b/lib/gcc/arm-linux-androideabi/4.7/include/stdalign.h
new file mode 100644
index 0000000..fd55ed3
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include/stdalign.h
@@ -0,0 +1,39 @@
+/* Copyright (C) 2011 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* ISO C1X: 7.15 Alignment <stdalign.h>. */
+
+#ifndef _STDALIGN_H
+#define _STDALIGN_H
+
+#ifndef __cplusplus
+
+#define alignas _Alignas
+#define alignof _Alignof
+
+#define __alignas_is_defined 1
+#define __alignof_is_defined 1
+
+#endif
+
+#endif /* stdalign.h */
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include/stdarg.h b/lib/gcc/arm-linux-androideabi/4.7/include/stdarg.h
new file mode 100644
index 0000000..54dc2e7
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include/stdarg.h
@@ -0,0 +1,130 @@
+/* Copyright (C) 1989, 1997, 1998, 1999, 2000, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 7.15 Variable arguments <stdarg.h>
+ */
+
+#ifndef _STDARG_H
+#ifndef _ANSI_STDARG_H_
+#ifndef __need___va_list
+#define _STDARG_H
+#define _ANSI_STDARG_H_
+#endif /* not __need___va_list */
+#undef __need___va_list
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+typedef __builtin_va_list __gnuc_va_list;
+#endif
+
+/* Define the standard macros for the user,
+ if this invocation was from the user program. */
+#ifdef _STDARG_H
+
+#define va_start(v,l) __builtin_va_start(v,l)
+#define va_end(v) __builtin_va_end(v)
+#define va_arg(v,l) __builtin_va_arg(v,l)
+#if !defined(__STRICT_ANSI__) || __STDC_VERSION__ + 0 >= 199900L || defined(__GXX_EXPERIMENTAL_CXX0X__)
+#define va_copy(d,s) __builtin_va_copy(d,s)
+#endif
+#define __va_copy(d,s) __builtin_va_copy(d,s)
+
+/* Define va_list, if desired, from __gnuc_va_list. */
+/* We deliberately do not define va_list when called from
+ stdio.h, because ANSI C says that stdio.h is not supposed to define
+ va_list. stdio.h needs to have access to that data type,
+ but must not use that name. It should use the name __gnuc_va_list,
+ which is safe because it is reserved for the implementation. */
+
+#ifdef _HIDDEN_VA_LIST /* On OSF1, this means varargs.h is "half-loaded". */
+#undef _VA_LIST
+#endif
+
+#ifdef _BSD_VA_LIST
+#undef _BSD_VA_LIST
+#endif
+
+#if defined(__svr4__) || (defined(_SCO_DS) && !defined(__VA_LIST))
+/* SVR4.2 uses _VA_LIST for an internal alias for va_list,
+ so we must avoid testing it and setting it here.
+ SVR4 uses _VA_LIST as a flag in stdarg.h, but we should
+ have no conflict with that. */
+#ifndef _VA_LIST_
+#define _VA_LIST_
+#ifdef __i860__
+#ifndef _VA_LIST
+#define _VA_LIST va_list
+#endif
+#endif /* __i860__ */
+typedef __gnuc_va_list va_list;
+#ifdef _SCO_DS
+#define __VA_LIST
+#endif
+#endif /* _VA_LIST_ */
+#else /* not __svr4__ || _SCO_DS */
+
+/* The macro _VA_LIST_ is the same thing used by this file in Ultrix.
+ But on BSD NET2 we must not test or define or undef it.
+ (Note that the comments in NET 2's ansi.h
+ are incorrect for _VA_LIST_--see stdio.h!) */
+#if !defined (_VA_LIST_) || defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__) || defined(WINNT)
+/* The macro _VA_LIST_DEFINED is used in Windows NT 3.5 */
+#ifndef _VA_LIST_DEFINED
+/* The macro _VA_LIST is used in SCO Unix 3.2. */
+#ifndef _VA_LIST
+/* The macro _VA_LIST_T_H is used in the Bull dpx2 */
+#ifndef _VA_LIST_T_H
+/* The macro __va_list__ is used by BeOS. */
+#ifndef __va_list__
+typedef __gnuc_va_list va_list;
+#endif /* not __va_list__ */
+#endif /* not _VA_LIST_T_H */
+#endif /* not _VA_LIST */
+#endif /* not _VA_LIST_DEFINED */
+#if !(defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__))
+#define _VA_LIST_
+#endif
+#ifndef _VA_LIST
+#define _VA_LIST
+#endif
+#ifndef _VA_LIST_DEFINED
+#define _VA_LIST_DEFINED
+#endif
+#ifndef _VA_LIST_T_H
+#define _VA_LIST_T_H
+#endif
+#ifndef __va_list__
+#define __va_list__
+#endif
+
+#endif /* not _VA_LIST_, except on certain systems */
+
+#endif /* not __svr4__ */
+
+#endif /* _STDARG_H */
+
+#endif /* not _ANSI_STDARG_H_ */
+#endif /* not _STDARG_H */
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include/stdbool.h b/lib/gcc/arm-linux-androideabi/4.7/include/stdbool.h
new file mode 100644
index 0000000..4ed911f
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include/stdbool.h
@@ -0,0 +1,50 @@
+/* Copyright (C) 1998, 1999, 2000, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 7.16 Boolean type and values <stdbool.h>
+ */
+
+#ifndef _STDBOOL_H
+#define _STDBOOL_H
+
+#ifndef __cplusplus
+
+#define bool _Bool
+#define true 1
+#define false 0
+
+#else /* __cplusplus */
+
+/* Supporting <stdbool.h> in C++ is a GCC extension. */
+#define _Bool bool
+#define bool bool
+#define false false
+#define true true
+
+#endif /* __cplusplus */
+
+/* Signal that all the definitions are present. */
+#define __bool_true_false_are_defined 1
+
+#endif /* stdbool.h */
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include/stddef.h b/lib/gcc/arm-linux-androideabi/4.7/include/stddef.h
new file mode 100644
index 0000000..8a03948
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include/stddef.h
@@ -0,0 +1,433 @@
+/* Copyright (C) 1989, 1997, 1998, 1999, 2000, 2002, 2004, 2009, 2011
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 7.17 Common definitions <stddef.h>
+ */
+#if (!defined(_STDDEF_H) && !defined(_STDDEF_H_) && !defined(_ANSI_STDDEF_H) \
+ && !defined(__STDDEF_H__)) \
+ || defined(__need_wchar_t) || defined(__need_size_t) \
+ || defined(__need_ptrdiff_t) || defined(__need_NULL) \
+ || defined(__need_wint_t)
+
+/* Any one of these symbols __need_* means that GNU libc
+ wants us just to define one data type. So don't define
+ the symbols that indicate this file's entire job has been done. */
+#if (!defined(__need_wchar_t) && !defined(__need_size_t) \
+ && !defined(__need_ptrdiff_t) && !defined(__need_NULL) \
+ && !defined(__need_wint_t))
+#define _STDDEF_H
+#define _STDDEF_H_
+/* snaroff@next.com says the NeXT needs this. */
+#define _ANSI_STDDEF_H
+/* Irix 5.1 needs this. */
+#define __STDDEF_H__
+#endif
+
+#ifndef __sys_stdtypes_h
+/* This avoids lossage on SunOS but only if stdtypes.h comes first.
+ There's no way to win with the other order! Sun lossage. */
+
+/* On 4.3bsd-net2, make sure ansi.h is included, so we have
+ one less case to deal with in the following. */
+#if defined (__BSD_NET2__) || defined (____386BSD____) || (defined (__FreeBSD__) && (__FreeBSD__ < 5)) || defined(__NetBSD__)
+#include <machine/ansi.h>
+#endif
+/* On FreeBSD 5, machine/ansi.h does not exist anymore... */
+#if defined (__FreeBSD__) && (__FreeBSD__ >= 5)
+#include <sys/_types.h>
+#endif
+
+/* In 4.3bsd-net2, machine/ansi.h defines these symbols, which are
+ defined if the corresponding type is *not* defined.
+ FreeBSD-2.1 defines _MACHINE_ANSI_H_ instead of _ANSI_H_.
+ NetBSD defines _I386_ANSI_H_ and _X86_64_ANSI_H_ instead of _ANSI_H_ */
+#if defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) || defined(_X86_64_ANSI_H_) || defined(_I386_ANSI_H_)
+#if !defined(_SIZE_T_) && !defined(_BSD_SIZE_T_)
+#define _SIZE_T
+#endif
+#if !defined(_PTRDIFF_T_) && !defined(_BSD_PTRDIFF_T_)
+#define _PTRDIFF_T
+#endif
+/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_
+ instead of _WCHAR_T_. */
+#if !defined(_WCHAR_T_) && !defined(_BSD_WCHAR_T_)
+#ifndef _BSD_WCHAR_T_
+#define _WCHAR_T
+#endif
+#endif
+/* Undef _FOO_T_ if we are supposed to define foo_t. */
+#if defined (__need_ptrdiff_t) || defined (_STDDEF_H_)
+#undef _PTRDIFF_T_
+#undef _BSD_PTRDIFF_T_
+#endif
+#if defined (__need_size_t) || defined (_STDDEF_H_)
+#undef _SIZE_T_
+#undef _BSD_SIZE_T_
+#endif
+#if defined (__need_wchar_t) || defined (_STDDEF_H_)
+#undef _WCHAR_T_
+#undef _BSD_WCHAR_T_
+#endif
+#endif /* defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) || defined(_X86_64_ANSI_H_) || defined(_I386_ANSI_H_) */
+
+/* Sequent's header files use _PTRDIFF_T_ in some conflicting way.
+ Just ignore it. */
+#if defined (__sequent__) && defined (_PTRDIFF_T_)
+#undef _PTRDIFF_T_
+#endif
+
+/* On VxWorks, <type/vxTypesBase.h> may have defined macros like
+ _TYPE_size_t which will typedef size_t. fixincludes patched the
+ vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is
+ not defined, and so that defining this macro defines _GCC_SIZE_T.
+ If we find that the macros are still defined at this point, we must
+ invoke them so that the type is defined as expected. */
+#if defined (_TYPE_ptrdiff_t) && (defined (__need_ptrdiff_t) || defined (_STDDEF_H_))
+_TYPE_ptrdiff_t;
+#undef _TYPE_ptrdiff_t
+#endif
+#if defined (_TYPE_size_t) && (defined (__need_size_t) || defined (_STDDEF_H_))
+_TYPE_size_t;
+#undef _TYPE_size_t
+#endif
+#if defined (_TYPE_wchar_t) && (defined (__need_wchar_t) || defined (_STDDEF_H_))
+_TYPE_wchar_t;
+#undef _TYPE_wchar_t
+#endif
+
+/* In case nobody has defined these types, but we aren't running under
+ GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and
+ __WCHAR_TYPE__ have reasonable values. This can happen if the
+ parts of GCC is compiled by an older compiler, that actually
+ include gstddef.h, such as collect2. */
+
+/* Signed type of difference of two pointers. */
+
+/* Define this type if we are doing the whole job,
+ or if we want this type in particular. */
+#if defined (_STDDEF_H) || defined (__need_ptrdiff_t)
+#ifndef _PTRDIFF_T /* in case <sys/types.h> has defined it. */
+#ifndef _T_PTRDIFF_
+#ifndef _T_PTRDIFF
+#ifndef __PTRDIFF_T
+#ifndef _PTRDIFF_T_
+#ifndef _BSD_PTRDIFF_T_
+#ifndef ___int_ptrdiff_t_h
+#ifndef _GCC_PTRDIFF_T
+#define _PTRDIFF_T
+#define _T_PTRDIFF_
+#define _T_PTRDIFF
+#define __PTRDIFF_T
+#define _PTRDIFF_T_
+#define _BSD_PTRDIFF_T_
+#define ___int_ptrdiff_t_h
+#define _GCC_PTRDIFF_T
+#ifndef __PTRDIFF_TYPE__
+#define __PTRDIFF_TYPE__ long int
+#endif
+typedef __PTRDIFF_TYPE__ ptrdiff_t;
+#endif /* _GCC_PTRDIFF_T */
+#endif /* ___int_ptrdiff_t_h */
+#endif /* _BSD_PTRDIFF_T_ */
+#endif /* _PTRDIFF_T_ */
+#endif /* __PTRDIFF_T */
+#endif /* _T_PTRDIFF */
+#endif /* _T_PTRDIFF_ */
+#endif /* _PTRDIFF_T */
+
+/* If this symbol has done its job, get rid of it. */
+#undef __need_ptrdiff_t
+
+#endif /* _STDDEF_H or __need_ptrdiff_t. */
+
+/* Unsigned type of `sizeof' something. */
+
+/* Define this type if we are doing the whole job,
+ or if we want this type in particular. */
+#if defined (_STDDEF_H) || defined (__need_size_t)
+#ifndef __size_t__ /* BeOS */
+#ifndef __SIZE_T__ /* Cray Unicos/Mk */
+#ifndef _SIZE_T /* in case <sys/types.h> has defined it. */
+#ifndef _SYS_SIZE_T_H
+#ifndef _T_SIZE_
+#ifndef _T_SIZE
+#ifndef __SIZE_T
+#ifndef _SIZE_T_
+#ifndef _BSD_SIZE_T_
+#ifndef _SIZE_T_DEFINED_
+#ifndef _SIZE_T_DEFINED
+#ifndef _BSD_SIZE_T_DEFINED_ /* Darwin */
+#ifndef _SIZE_T_DECLARED /* FreeBSD 5 */
+#ifndef ___int_size_t_h
+#ifndef _GCC_SIZE_T
+#ifndef _SIZET_
+#ifndef __size_t
+#define __size_t__ /* BeOS */
+#define __SIZE_T__ /* Cray Unicos/Mk */
+#define _SIZE_T
+#define _SYS_SIZE_T_H
+#define _T_SIZE_
+#define _T_SIZE
+#define __SIZE_T
+#define _SIZE_T_
+#define _BSD_SIZE_T_
+#define _SIZE_T_DEFINED_
+#define _SIZE_T_DEFINED
+#define _BSD_SIZE_T_DEFINED_ /* Darwin */
+#define _SIZE_T_DECLARED /* FreeBSD 5 */
+#define ___int_size_t_h
+#define _GCC_SIZE_T
+#define _SIZET_
+#if (defined (__FreeBSD__) && (__FreeBSD__ >= 5)) \
+ || defined(__FreeBSD_kernel__)
+/* __size_t is a typedef on FreeBSD 5, must not trash it. */
+#else
+#define __size_t
+#endif
+#ifndef __SIZE_TYPE__
+#define __SIZE_TYPE__ long unsigned int
+#endif
+#if !(defined (__GNUG__) && defined (size_t))
+typedef __SIZE_TYPE__ size_t;
+#ifdef __BEOS__
+typedef long ssize_t;
+#endif /* __BEOS__ */
+#endif /* !(defined (__GNUG__) && defined (size_t)) */
+#endif /* __size_t */
+#endif /* _SIZET_ */
+#endif /* _GCC_SIZE_T */
+#endif /* ___int_size_t_h */
+#endif /* _SIZE_T_DECLARED */
+#endif /* _BSD_SIZE_T_DEFINED_ */
+#endif /* _SIZE_T_DEFINED */
+#endif /* _SIZE_T_DEFINED_ */
+#endif /* _BSD_SIZE_T_ */
+#endif /* _SIZE_T_ */
+#endif /* __SIZE_T */
+#endif /* _T_SIZE */
+#endif /* _T_SIZE_ */
+#endif /* _SYS_SIZE_T_H */
+#endif /* _SIZE_T */
+#endif /* __SIZE_T__ */
+#endif /* __size_t__ */
+#undef __need_size_t
+#endif /* _STDDEF_H or __need_size_t. */
+
+
+/* Wide character type.
+ Locale-writers should change this as necessary to
+ be big enough to hold unique values not between 0 and 127,
+ and not (wchar_t) -1, for each defined multibyte character. */
+
+/* Define this type if we are doing the whole job,
+ or if we want this type in particular. */
+#if defined (_STDDEF_H) || defined (__need_wchar_t)
+#ifndef __wchar_t__ /* BeOS */
+#ifndef __WCHAR_T__ /* Cray Unicos/Mk */
+#ifndef _WCHAR_T
+#ifndef _T_WCHAR_
+#ifndef _T_WCHAR
+#ifndef __WCHAR_T
+#ifndef _WCHAR_T_
+#ifndef _BSD_WCHAR_T_
+#ifndef _BSD_WCHAR_T_DEFINED_ /* Darwin */
+#ifndef _BSD_RUNE_T_DEFINED_ /* Darwin */
+#ifndef _WCHAR_T_DECLARED /* FreeBSD 5 */
+#ifndef _WCHAR_T_DEFINED_
+#ifndef _WCHAR_T_DEFINED
+#ifndef _WCHAR_T_H
+#ifndef ___int_wchar_t_h
+#ifndef __INT_WCHAR_T_H
+#ifndef _GCC_WCHAR_T
+#define __wchar_t__ /* BeOS */
+#define __WCHAR_T__ /* Cray Unicos/Mk */
+#define _WCHAR_T
+#define _T_WCHAR_
+#define _T_WCHAR
+#define __WCHAR_T
+#define _WCHAR_T_
+#define _BSD_WCHAR_T_
+#define _WCHAR_T_DEFINED_
+#define _WCHAR_T_DEFINED
+#define _WCHAR_T_H
+#define ___int_wchar_t_h
+#define __INT_WCHAR_T_H
+#define _GCC_WCHAR_T
+#define _WCHAR_T_DECLARED
+
+/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_
+ instead of _WCHAR_T_, and _BSD_RUNE_T_ (which, unlike the other
+ symbols in the _FOO_T_ family, stays defined even after its
+ corresponding type is defined). If we define wchar_t, then we
+ must undef _WCHAR_T_; for BSD/386 1.1 (and perhaps others), if
+ we undef _WCHAR_T_, then we must also define rune_t, since
+ headers like runetype.h assume that if machine/ansi.h is included,
+ and _BSD_WCHAR_T_ is not defined, then rune_t is available.
+ machine/ansi.h says, "Note that _WCHAR_T_ and _RUNE_T_ must be of
+ the same type." */
+#ifdef _BSD_WCHAR_T_
+#undef _BSD_WCHAR_T_
+#ifdef _BSD_RUNE_T_
+#if !defined (_ANSI_SOURCE) && !defined (_POSIX_SOURCE)
+typedef _BSD_RUNE_T_ rune_t;
+#define _BSD_WCHAR_T_DEFINED_
+#define _BSD_RUNE_T_DEFINED_ /* Darwin */
+#if defined (__FreeBSD__) && (__FreeBSD__ < 5)
+/* Why is this file so hard to maintain properly? In contrast to
+ the comment above regarding BSD/386 1.1, on FreeBSD for as long
+ as the symbol has existed, _BSD_RUNE_T_ must not stay defined or
+ redundant typedefs will occur when stdlib.h is included after this file. */
+#undef _BSD_RUNE_T_
+#endif
+#endif
+#endif
+#endif
+/* FreeBSD 5 can't be handled well using "traditional" logic above
+ since it no longer defines _BSD_RUNE_T_ yet still desires to export
+ rune_t in some cases... */
+#if defined (__FreeBSD__) && (__FreeBSD__ >= 5)
+#if !defined (_ANSI_SOURCE) && !defined (_POSIX_SOURCE)
+#if __BSD_VISIBLE
+#ifndef _RUNE_T_DECLARED
+typedef __rune_t rune_t;
+#define _RUNE_T_DECLARED
+#endif
+#endif
+#endif
+#endif
+
+#ifndef __WCHAR_TYPE__
+#define __WCHAR_TYPE__ int
+#endif
+#ifndef __cplusplus
+typedef __WCHAR_TYPE__ wchar_t;
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif /* _WCHAR_T_DECLARED */
+#endif /* _BSD_RUNE_T_DEFINED_ */
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif /* __WCHAR_T__ */
+#endif /* __wchar_t__ */
+#undef __need_wchar_t
+#endif /* _STDDEF_H or __need_wchar_t. */
+
+#if defined (__need_wint_t)
+#ifndef _WINT_T
+#define _WINT_T
+
+#ifndef __WINT_TYPE__
+#define __WINT_TYPE__ unsigned int
+#endif
+typedef __WINT_TYPE__ wint_t;
+#endif
+#undef __need_wint_t
+#endif
+
+/* In 4.3bsd-net2, leave these undefined to indicate that size_t, etc.
+ are already defined. */
+/* BSD/OS 3.1 and FreeBSD [23].x require the MACHINE_ANSI_H check here. */
+/* NetBSD 5 requires the I386_ANSI_H and X86_64_ANSI_H checks here. */
+#if defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) || defined(_X86_64_ANSI_H_) || defined(_I386_ANSI_H_)
+/* The references to _GCC_PTRDIFF_T_, _GCC_SIZE_T_, and _GCC_WCHAR_T_
+ are probably typos and should be removed before 2.8 is released. */
+#ifdef _GCC_PTRDIFF_T_
+#undef _PTRDIFF_T_
+#undef _BSD_PTRDIFF_T_
+#endif
+#ifdef _GCC_SIZE_T_
+#undef _SIZE_T_
+#undef _BSD_SIZE_T_
+#endif
+#ifdef _GCC_WCHAR_T_
+#undef _WCHAR_T_
+#undef _BSD_WCHAR_T_
+#endif
+/* The following ones are the real ones. */
+#ifdef _GCC_PTRDIFF_T
+#undef _PTRDIFF_T_
+#undef _BSD_PTRDIFF_T_
+#endif
+#ifdef _GCC_SIZE_T
+#undef _SIZE_T_
+#undef _BSD_SIZE_T_
+#endif
+#ifdef _GCC_WCHAR_T
+#undef _WCHAR_T_
+#undef _BSD_WCHAR_T_
+#endif
+#endif /* _ANSI_H_ || _MACHINE_ANSI_H_ || _X86_64_ANSI_H_ || _I386_ANSI_H_ */
+
+#endif /* __sys_stdtypes_h */
+
+/* A null pointer constant. */
+
+#if defined (_STDDEF_H) || defined (__need_NULL)
+#undef NULL /* in case <stdio.h> has defined it. */
+#ifdef __GNUG__
+#define NULL __null
+#else /* G++ */
+#ifndef __cplusplus
+#define NULL ((void *)0)
+#else /* C++ */
+#define NULL 0
+#endif /* C++ */
+#endif /* G++ */
+#endif /* NULL not defined and <stddef.h> or need NULL. */
+#undef __need_NULL
+
+#ifdef _STDDEF_H
+
+/* Offset of member MEMBER in a struct of type TYPE. */
+#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER)
+
+#if (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) \
+ || (defined(__cplusplus) && __cplusplus >= 201103L)
+#ifndef _GCC_MAX_ALIGN_T
+#define _GCC_MAX_ALIGN_T
+/* Type whose alignment is supported in every context and is at least
+ as great as that of any standard type not using alignment
+ specifiers. */
+typedef struct {
+ long long __max_align_ll __attribute__((__aligned__(__alignof__(long long))));
+ long double __max_align_ld __attribute__((__aligned__(__alignof__(long double))));
+} max_align_t;
+#endif
+#endif /* C11 or C++11. */
+
+#endif /* _STDDEF_H was defined this time */
+
+#endif /* !_STDDEF_H && !_STDDEF_H_ && !_ANSI_STDDEF_H && !__STDDEF_H__
+ || __need_XXX was not defined before */
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include/stdfix.h b/lib/gcc/arm-linux-androideabi/4.7/include/stdfix.h
new file mode 100644
index 0000000..3c3ec00
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include/stdfix.h
@@ -0,0 +1,204 @@
+/* Copyright (C) 2007, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* ISO/IEC JTC1 SC22 WG14 N1169
+ * Date: 2006-04-04
+ * ISO/IEC TR 18037
+ * Programming languages - C - Extensions to support embedded processors
+ */
+
+#ifndef _STDFIX_H
+#define _STDFIX_H
+
+/* 7.18a.1 Introduction. */
+
+#undef fract
+#undef accum
+#undef sat
+#define fract _Fract
+#define accum _Accum
+#define sat _Sat
+
+/* 7.18a.3 Precision macros. */
+
+#undef SFRACT_FBIT
+#undef SFRACT_MIN
+#undef SFRACT_MAX
+#undef SFRACT_EPSILON
+#define SFRACT_FBIT __SFRACT_FBIT__
+#define SFRACT_MIN __SFRACT_MIN__
+#define SFRACT_MAX __SFRACT_MAX__
+#define SFRACT_EPSILON __SFRACT_EPSILON__
+
+#undef USFRACT_FBIT
+#undef USFRACT_MIN
+#undef USFRACT_MAX
+#undef USFRACT_EPSILON
+#define USFRACT_FBIT __USFRACT_FBIT__
+#define USFRACT_MIN __USFRACT_MIN__ /* GCC extension. */
+#define USFRACT_MAX __USFRACT_MAX__
+#define USFRACT_EPSILON __USFRACT_EPSILON__
+
+#undef FRACT_FBIT
+#undef FRACT_MIN
+#undef FRACT_MAX
+#undef FRACT_EPSILON
+#define FRACT_FBIT __FRACT_FBIT__
+#define FRACT_MIN __FRACT_MIN__
+#define FRACT_MAX __FRACT_MAX__
+#define FRACT_EPSILON __FRACT_EPSILON__
+
+#undef UFRACT_FBIT
+#undef UFRACT_MIN
+#undef UFRACT_MAX
+#undef UFRACT_EPSILON
+#define UFRACT_FBIT __UFRACT_FBIT__
+#define UFRACT_MIN __UFRACT_MIN__ /* GCC extension. */
+#define UFRACT_MAX __UFRACT_MAX__
+#define UFRACT_EPSILON __UFRACT_EPSILON__
+
+#undef LFRACT_FBIT
+#undef LFRACT_MIN
+#undef LFRACT_MAX
+#undef LFRACT_EPSILON
+#define LFRACT_FBIT __LFRACT_FBIT__
+#define LFRACT_MIN __LFRACT_MIN__
+#define LFRACT_MAX __LFRACT_MAX__
+#define LFRACT_EPSILON __LFRACT_EPSILON__
+
+#undef ULFRACT_FBIT
+#undef ULFRACT_MIN
+#undef ULFRACT_MAX
+#undef ULFRACT_EPSILON
+#define ULFRACT_FBIT __ULFRACT_FBIT__
+#define ULFRACT_MIN __ULFRACT_MIN__ /* GCC extension. */
+#define ULFRACT_MAX __ULFRACT_MAX__
+#define ULFRACT_EPSILON __ULFRACT_EPSILON__
+
+#undef LLFRACT_FBIT
+#undef LLFRACT_MIN
+#undef LLFRACT_MAX
+#undef LLFRACT_EPSILON
+#define LLFRACT_FBIT __LLFRACT_FBIT__ /* GCC extension. */
+#define LLFRACT_MIN __LLFRACT_MIN__ /* GCC extension. */
+#define LLFRACT_MAX __LLFRACT_MAX__ /* GCC extension. */
+#define LLFRACT_EPSILON __LLFRACT_EPSILON__ /* GCC extension. */
+
+#undef ULLFRACT_FBIT
+#undef ULLFRACT_MIN
+#undef ULLFRACT_MAX
+#undef ULLFRACT_EPSILON
+#define ULLFRACT_FBIT __ULLFRACT_FBIT__ /* GCC extension. */
+#define ULLFRACT_MIN __ULLFRACT_MIN__ /* GCC extension. */
+#define ULLFRACT_MAX __ULLFRACT_MAX__ /* GCC extension. */
+#define ULLFRACT_EPSILON __ULLFRACT_EPSILON__ /* GCC extension. */
+
+#undef SACCUM_FBIT
+#undef SACCUM_IBIT
+#undef SACCUM_MIN
+#undef SACCUM_MAX
+#undef SACCUM_EPSILON
+#define SACCUM_FBIT __SACCUM_FBIT__
+#define SACCUM_IBIT __SACCUM_IBIT__
+#define SACCUM_MIN __SACCUM_MIN__
+#define SACCUM_MAX __SACCUM_MAX__
+#define SACCUM_EPSILON __SACCUM_EPSILON__
+
+#undef USACCUM_FBIT
+#undef USACCUM_IBIT
+#undef USACCUM_MIN
+#undef USACCUM_MAX
+#undef USACCUM_EPSILON
+#define USACCUM_FBIT __USACCUM_FBIT__
+#define USACCUM_IBIT __USACCUM_IBIT__
+#define USACCUM_MIN __USACCUM_MIN__ /* GCC extension. */
+#define USACCUM_MAX __USACCUM_MAX__
+#define USACCUM_EPSILON __USACCUM_EPSILON__
+
+#undef ACCUM_FBIT
+#undef ACCUM_IBIT
+#undef ACCUM_MIN
+#undef ACCUM_MAX
+#undef ACCUM_EPSILON
+#define ACCUM_FBIT __ACCUM_FBIT__
+#define ACCUM_IBIT __ACCUM_IBIT__
+#define ACCUM_MIN __ACCUM_MIN__
+#define ACCUM_MAX __ACCUM_MAX__
+#define ACCUM_EPSILON __ACCUM_EPSILON__
+
+#undef UACCUM_FBIT
+#undef UACCUM_IBIT
+#undef UACCUM_MIN
+#undef UACCUM_MAX
+#undef UACCUM_EPSILON
+#define UACCUM_FBIT __UACCUM_FBIT__
+#define UACCUM_IBIT __UACCUM_IBIT__
+#define UACCUM_MIN __UACCUM_MIN__ /* GCC extension. */
+#define UACCUM_MAX __UACCUM_MAX__
+#define UACCUM_EPSILON __UACCUM_EPSILON__
+
+#undef LACCUM_FBIT
+#undef LACCUM_IBIT
+#undef LACCUM_MIN
+#undef LACCUM_MAX
+#undef LACCUM_EPSILON
+#define LACCUM_FBIT __LACCUM_FBIT__
+#define LACCUM_IBIT __LACCUM_IBIT__
+#define LACCUM_MIN __LACCUM_MIN__
+#define LACCUM_MAX __LACCUM_MAX__
+#define LACCUM_EPSILON __LACCUM_EPSILON__
+
+#undef ULACCUM_FBIT
+#undef ULACCUM_IBIT
+#undef ULACCUM_MIN
+#undef ULACCUM_MAX
+#undef ULACCUM_EPSILON
+#define ULACCUM_FBIT __ULACCUM_FBIT__
+#define ULACCUM_IBIT __ULACCUM_IBIT__
+#define ULACCUM_MIN __ULACCUM_MIN__ /* GCC extension. */
+#define ULACCUM_MAX __ULACCUM_MAX__
+#define ULACCUM_EPSILON __ULACCUM_EPSILON__
+
+#undef LLACCUM_FBIT
+#undef LLACCUM_IBIT
+#undef LLACCUM_MIN
+#undef LLACCUM_MAX
+#undef LLACCUM_EPSILON
+#define LLACCUM_FBIT __LLACCUM_FBIT__ /* GCC extension. */
+#define LLACCUM_IBIT __LLACCUM_IBIT__ /* GCC extension. */
+#define LLACCUM_MIN __LLACCUM_MIN__ /* GCC extension. */
+#define LLACCUM_MAX __LLACCUM_MAX__ /* GCC extension. */
+#define LLACCUM_EPSILON __LLACCUM_EPSILON__ /* GCC extension. */
+
+#undef ULLACCUM_FBIT
+#undef ULLACCUM_IBIT
+#undef ULLACCUM_MIN
+#undef ULLACCUM_MAX
+#undef ULLACCUM_EPSILON
+#define ULLACCUM_FBIT __ULLACCUM_FBIT__ /* GCC extension. */
+#define ULLACCUM_IBIT __ULLACCUM_IBIT__ /* GCC extension. */
+#define ULLACCUM_MIN __ULLACCUM_MIN__ /* GCC extension. */
+#define ULLACCUM_MAX __ULLACCUM_MAX__ /* GCC extension. */
+#define ULLACCUM_EPSILON __ULLACCUM_EPSILON__ /* GCC extension. */
+
+#endif /* _STDFIX_H */
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include/stdint-gcc.h b/lib/gcc/arm-linux-androideabi/4.7/include/stdint-gcc.h
new file mode 100644
index 0000000..22780a1
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include/stdint-gcc.h
@@ -0,0 +1,259 @@
+/* Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 7.18 Integer types <stdint.h>
+ */
+
+#ifndef _GCC_STDINT_H
+#define _GCC_STDINT_H
+
+/* 7.8.1.1 Exact-width integer types */
+
+#ifdef __INT8_TYPE__
+typedef __INT8_TYPE__ int8_t;
+#endif
+#ifdef __INT16_TYPE__
+typedef __INT16_TYPE__ int16_t;
+#endif
+#ifdef __INT32_TYPE__
+typedef __INT32_TYPE__ int32_t;
+#endif
+#ifdef __INT64_TYPE__
+typedef __INT64_TYPE__ int64_t;
+#endif
+#ifdef __UINT8_TYPE__
+typedef __UINT8_TYPE__ uint8_t;
+#endif
+#ifdef __UINT16_TYPE__
+typedef __UINT16_TYPE__ uint16_t;
+#endif
+#ifdef __UINT32_TYPE__
+typedef __UINT32_TYPE__ uint32_t;
+#endif
+#ifdef __UINT64_TYPE__
+typedef __UINT64_TYPE__ uint64_t;
+#endif
+
+/* 7.8.1.2 Minimum-width integer types */
+
+typedef __INT_LEAST8_TYPE__ int_least8_t;
+typedef __INT_LEAST16_TYPE__ int_least16_t;
+typedef __INT_LEAST32_TYPE__ int_least32_t;
+typedef __INT_LEAST64_TYPE__ int_least64_t;
+typedef __UINT_LEAST8_TYPE__ uint_least8_t;
+typedef __UINT_LEAST16_TYPE__ uint_least16_t;
+typedef __UINT_LEAST32_TYPE__ uint_least32_t;
+typedef __UINT_LEAST64_TYPE__ uint_least64_t;
+
+/* 7.8.1.3 Fastest minimum-width integer types */
+
+typedef __INT_FAST8_TYPE__ int_fast8_t;
+typedef __INT_FAST16_TYPE__ int_fast16_t;
+typedef __INT_FAST32_TYPE__ int_fast32_t;
+typedef __INT_FAST64_TYPE__ int_fast64_t;
+typedef __UINT_FAST8_TYPE__ uint_fast8_t;
+typedef __UINT_FAST16_TYPE__ uint_fast16_t;
+typedef __UINT_FAST32_TYPE__ uint_fast32_t;
+typedef __UINT_FAST64_TYPE__ uint_fast64_t;
+
+/* 7.8.1.4 Integer types capable of holding object pointers */
+
+#ifdef __INTPTR_TYPE__
+typedef __INTPTR_TYPE__ intptr_t;
+#endif
+#ifdef __UINTPTR_TYPE__
+typedef __UINTPTR_TYPE__ uintptr_t;
+#endif
+
+/* 7.8.1.5 Greatest-width integer types */
+
+typedef __INTMAX_TYPE__ intmax_t;
+typedef __UINTMAX_TYPE__ uintmax_t;
+
+#if !defined __cplusplus || defined __STDC_LIMIT_MACROS
+
+/* 7.18.2 Limits of specified-width integer types */
+
+#ifdef __INT8_MAX__
+# undef INT8_MAX
+# define INT8_MAX __INT8_MAX__
+# undef INT8_MIN
+# define INT8_MIN (-INT8_MAX - 1)
+#endif
+#ifdef __UINT8_MAX__
+# undef UINT8_MAX
+# define UINT8_MAX __UINT8_MAX__
+#endif
+#ifdef __INT16_MAX__
+# undef INT16_MAX
+# define INT16_MAX __INT16_MAX__
+# undef INT16_MIN
+# define INT16_MIN (-INT16_MAX - 1)
+#endif
+#ifdef __UINT16_MAX__
+# undef UINT16_MAX
+# define UINT16_MAX __UINT16_MAX__
+#endif
+#ifdef __INT32_MAX__
+# undef INT32_MAX
+# define INT32_MAX __INT32_MAX__
+# undef INT32_MIN
+# define INT32_MIN (-INT32_MAX - 1)
+#endif
+#ifdef __UINT32_MAX__
+# undef UINT32_MAX
+# define UINT32_MAX __UINT32_MAX__
+#endif
+#ifdef __INT64_MAX__
+# undef INT64_MAX
+# define INT64_MAX __INT64_MAX__
+# undef INT64_MIN
+# define INT64_MIN (-INT64_MAX - 1)
+#endif
+#ifdef __UINT64_MAX__
+# undef UINT64_MAX
+# define UINT64_MAX __UINT64_MAX__
+#endif
+
+#undef INT_LEAST8_MAX
+#define INT_LEAST8_MAX __INT_LEAST8_MAX__
+#undef INT_LEAST8_MIN
+#define INT_LEAST8_MIN (-INT_LEAST8_MAX - 1)
+#undef UINT_LEAST8_MAX
+#define UINT_LEAST8_MAX __UINT_LEAST8_MAX__
+#undef INT_LEAST16_MAX
+#define INT_LEAST16_MAX __INT_LEAST16_MAX__
+#undef INT_LEAST16_MIN
+#define INT_LEAST16_MIN (-INT_LEAST16_MAX - 1)
+#undef UINT_LEAST16_MAX
+#define UINT_LEAST16_MAX __UINT_LEAST16_MAX__
+#undef INT_LEAST32_MAX
+#define INT_LEAST32_MAX __INT_LEAST32_MAX__
+#undef INT_LEAST32_MIN
+#define INT_LEAST32_MIN (-INT_LEAST32_MAX - 1)
+#undef UINT_LEAST32_MAX
+#define UINT_LEAST32_MAX __UINT_LEAST32_MAX__
+#undef INT_LEAST64_MAX
+#define INT_LEAST64_MAX __INT_LEAST64_MAX__
+#undef INT_LEAST64_MIN
+#define INT_LEAST64_MIN (-INT_LEAST64_MAX - 1)
+#undef UINT_LEAST64_MAX
+#define UINT_LEAST64_MAX __UINT_LEAST64_MAX__
+
+#undef INT_FAST8_MAX
+#define INT_FAST8_MAX __INT_FAST8_MAX__
+#undef INT_FAST8_MIN
+#define INT_FAST8_MIN (-INT_FAST8_MAX - 1)
+#undef UINT_FAST8_MAX
+#define UINT_FAST8_MAX __UINT_FAST8_MAX__
+#undef INT_FAST16_MAX
+#define INT_FAST16_MAX __INT_FAST16_MAX__
+#undef INT_FAST16_MIN
+#define INT_FAST16_MIN (-INT_FAST16_MAX - 1)
+#undef UINT_FAST16_MAX
+#define UINT_FAST16_MAX __UINT_FAST16_MAX__
+#undef INT_FAST32_MAX
+#define INT_FAST32_MAX __INT_FAST32_MAX__
+#undef INT_FAST32_MIN
+#define INT_FAST32_MIN (-INT_FAST32_MAX - 1)
+#undef UINT_FAST32_MAX
+#define UINT_FAST32_MAX __UINT_FAST32_MAX__
+#undef INT_FAST64_MAX
+#define INT_FAST64_MAX __INT_FAST64_MAX__
+#undef INT_FAST64_MIN
+#define INT_FAST64_MIN (-INT_FAST64_MAX - 1)
+#undef UINT_FAST64_MAX
+#define UINT_FAST64_MAX __UINT_FAST64_MAX__
+
+#ifdef __INTPTR_MAX__
+# undef INTPTR_MAX
+# define INTPTR_MAX __INTPTR_MAX__
+# undef INTPTR_MIN
+# define INTPTR_MIN (-INTPTR_MAX - 1)
+#endif
+#ifdef __UINTPTR_MAX__
+# undef UINTPTR_MAX
+# define UINTPTR_MAX __UINTPTR_MAX__
+#endif
+
+#undef INTMAX_MAX
+#define INTMAX_MAX __INTMAX_MAX__
+#undef INTMAX_MIN
+#define INTMAX_MIN (-INTMAX_MAX - 1)
+#undef UINTMAX_MAX
+#define UINTMAX_MAX __UINTMAX_MAX__
+
+/* 7.18.3 Limits of other integer types */
+
+#undef PTRDIFF_MAX
+#define PTRDIFF_MAX __PTRDIFF_MAX__
+#undef PTRDIFF_MIN
+#define PTRDIFF_MIN (-PTRDIFF_MAX - 1)
+
+#undef SIG_ATOMIC_MAX
+#define SIG_ATOMIC_MAX __SIG_ATOMIC_MAX__
+#undef SIG_ATOMIC_MIN
+#define SIG_ATOMIC_MIN __SIG_ATOMIC_MIN__
+
+#undef SIZE_MAX
+#define SIZE_MAX __SIZE_MAX__
+
+#undef WCHAR_MAX
+#define WCHAR_MAX __WCHAR_MAX__
+#undef WCHAR_MIN
+#define WCHAR_MIN __WCHAR_MIN__
+
+#undef WINT_MAX
+#define WINT_MAX __WINT_MAX__
+#undef WINT_MIN
+#define WINT_MIN __WINT_MIN__
+
+#endif /* !defined __cplusplus || defined __STDC_LIMIT_MACROS */
+
+#if !defined __cplusplus || defined __STDC_CONSTANT_MACROS
+
+#undef INT8_C
+#define INT8_C(c) __INT8_C(c)
+#undef INT16_C
+#define INT16_C(c) __INT16_C(c)
+#undef INT32_C
+#define INT32_C(c) __INT32_C(c)
+#undef INT64_C
+#define INT64_C(c) __INT64_C(c)
+#undef UINT8_C
+#define UINT8_C(c) __UINT8_C(c)
+#undef UINT16_C
+#define UINT16_C(c) __UINT16_C(c)
+#undef UINT32_C
+#define UINT32_C(c) __UINT32_C(c)
+#undef UINT64_C
+#define UINT64_C(c) __UINT64_C(c)
+#undef INTMAX_C
+#define INTMAX_C(c) __INTMAX_C(c)
+#undef UINTMAX_C
+#define UINTMAX_C(c) __UINTMAX_C(c)
+
+#endif /* !defined __cplusplus || defined __STDC_CONSTANT_MACROS */
+
+#endif /* _GCC_STDINT_H */
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include/stdint.h b/lib/gcc/arm-linux-androideabi/4.7/include/stdint.h
new file mode 100644
index 0000000..e45f819
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include/stdint.h
@@ -0,0 +1,8 @@
+#ifndef _GCC_WRAP_STDINT_H
+#if __STDC_HOSTED__
+# include_next <stdint.h>
+#else
+# include "stdint-gcc.h"
+#endif
+#define _GCC_WRAP_STDINT_H
+#endif
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include/stdnoreturn.h b/lib/gcc/arm-linux-androideabi/4.7/include/stdnoreturn.h
new file mode 100644
index 0000000..3efbd8a
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include/stdnoreturn.h
@@ -0,0 +1,35 @@
+/* Copyright (C) 2011 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* ISO C1X: 7.23 _Noreturn <stdnoreturn.h>. */
+
+#ifndef _STDNORETURN_H
+#define _STDNORETURN_H
+
+#ifndef __cplusplus
+
+#define noreturn _Noreturn
+
+#endif
+
+#endif /* stdnoreturn.h */
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include/unwind-arm-common.h b/lib/gcc/arm-linux-androideabi/4.7/include/unwind-arm-common.h
new file mode 100644
index 0000000..9587270
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include/unwind-arm-common.h
@@ -0,0 +1,251 @@
+/* Header file for the ARM EABI and C6X unwinders
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011
+ Free Software Foundation, Inc.
+ Contributed by Paul Brook
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Language-independent unwinder header public defines. This contains both
+ ABI defined objects, and GNU support routines. */
+
+#ifndef UNWIND_ARM_COMMON_H
+#define UNWIND_ARM_COMMON_H
+
+#define __ARM_EABI_UNWINDER__ 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ typedef unsigned _Unwind_Word __attribute__((__mode__(__word__)));
+ typedef signed _Unwind_Sword __attribute__((__mode__(__word__)));
+ typedef unsigned _Unwind_Ptr __attribute__((__mode__(__pointer__)));
+ typedef unsigned _Unwind_Internal_Ptr __attribute__((__mode__(__pointer__)));
+ typedef _Unwind_Word _uw;
+ typedef unsigned _uw64 __attribute__((mode(__DI__)));
+ typedef unsigned _uw16 __attribute__((mode(__HI__)));
+ typedef unsigned _uw8 __attribute__((mode(__QI__)));
+
+ typedef enum
+ {
+ _URC_OK = 0, /* operation completed successfully */
+ _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+ _URC_END_OF_STACK = 5,
+ _URC_HANDLER_FOUND = 6,
+ _URC_INSTALL_CONTEXT = 7,
+ _URC_CONTINUE_UNWIND = 8,
+ _URC_FAILURE = 9 /* unspecified failure of some kind */
+ }
+ _Unwind_Reason_Code;
+
+ typedef enum
+ {
+ _US_VIRTUAL_UNWIND_FRAME = 0,
+ _US_UNWIND_FRAME_STARTING = 1,
+ _US_UNWIND_FRAME_RESUME = 2,
+ _US_ACTION_MASK = 3,
+ _US_FORCE_UNWIND = 8,
+ _US_END_OF_STACK = 16
+ }
+ _Unwind_State;
+
+ /* Provided only for compatibility with existing code. */
+ typedef int _Unwind_Action;
+#define _UA_SEARCH_PHASE 1
+#define _UA_CLEANUP_PHASE 2
+#define _UA_HANDLER_FRAME 4
+#define _UA_FORCE_UNWIND 8
+#define _UA_END_OF_STACK 16
+#define _URC_NO_REASON _URC_OK
+
+ typedef struct _Unwind_Control_Block _Unwind_Control_Block;
+ typedef struct _Unwind_Context _Unwind_Context;
+ typedef _uw _Unwind_EHT_Header;
+
+
+ /* UCB: */
+
+ struct _Unwind_Control_Block
+ {
+ char exception_class[8];
+ void (*exception_cleanup)(_Unwind_Reason_Code, _Unwind_Control_Block *);
+ /* Unwinder cache, private fields for the unwinder's use */
+ struct
+ {
+ _uw reserved1; /* Forced unwind stop fn, 0 if not forced */
+ _uw reserved2; /* Personality routine address */
+ _uw reserved3; /* Saved callsite address */
+ _uw reserved4; /* Forced unwind stop arg */
+ _uw reserved5;
+ }
+ unwinder_cache;
+ /* Propagation barrier cache (valid after phase 1): */
+ struct
+ {
+ _uw sp;
+ _uw bitpattern[5];
+ }
+ barrier_cache;
+ /* Cleanup cache (preserved over cleanup): */
+ struct
+ {
+ _uw bitpattern[4];
+ }
+ cleanup_cache;
+ /* Pr cache (for pr's benefit): */
+ struct
+ {
+ _uw fnstart; /* function start address */
+ _Unwind_EHT_Header *ehtp; /* pointer to EHT entry header word */
+ _uw additional; /* additional data */
+ _uw reserved1;
+ }
+ pr_cache;
+ long long int :0; /* Force alignment to 8-byte boundary */
+ };
+
+ /* Virtual Register Set*/
+
+ typedef enum
+ {
+ _UVRSC_CORE = 0, /* integer register */
+ _UVRSC_VFP = 1, /* vfp */
+ _UVRSC_FPA = 2, /* fpa */
+ _UVRSC_WMMXD = 3, /* Intel WMMX data register */
+ _UVRSC_WMMXC = 4 /* Intel WMMX control register */
+ }
+ _Unwind_VRS_RegClass;
+
+ typedef enum
+ {
+ _UVRSD_UINT32 = 0,
+ _UVRSD_VFPX = 1,
+ _UVRSD_FPAX = 2,
+ _UVRSD_UINT64 = 3,
+ _UVRSD_FLOAT = 4,
+ _UVRSD_DOUBLE = 5
+ }
+ _Unwind_VRS_DataRepresentation;
+
+ typedef enum
+ {
+ _UVRSR_OK = 0,
+ _UVRSR_NOT_IMPLEMENTED = 1,
+ _UVRSR_FAILED = 2
+ }
+ _Unwind_VRS_Result;
+
+ /* Frame unwinding state. */
+ typedef struct
+ {
+ /* The current word (bytes packed msb first). */
+ _uw data;
+ /* Pointer to the next word of data. */
+ _uw *next;
+ /* The number of bytes left in this word. */
+ _uw8 bytes_left;
+ /* The number of words pointed to by ptr. */
+ _uw8 words_left;
+ }
+ __gnu_unwind_state;
+
+ typedef _Unwind_Reason_Code (*personality_routine) (_Unwind_State,
+ _Unwind_Control_Block *, _Unwind_Context *);
+
+ _Unwind_VRS_Result _Unwind_VRS_Set(_Unwind_Context *, _Unwind_VRS_RegClass,
+ _uw, _Unwind_VRS_DataRepresentation,
+ void *);
+
+ _Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *, _Unwind_VRS_RegClass,
+ _uw, _Unwind_VRS_DataRepresentation,
+ void *);
+
+ _Unwind_VRS_Result _Unwind_VRS_Pop(_Unwind_Context *, _Unwind_VRS_RegClass,
+ _uw, _Unwind_VRS_DataRepresentation);
+
+
+ /* Support functions for the PR. */
+#define _Unwind_Exception _Unwind_Control_Block
+ typedef char _Unwind_Exception_Class[8];
+
+ void * _Unwind_GetLanguageSpecificData (_Unwind_Context *);
+ _Unwind_Ptr _Unwind_GetRegionStart (_Unwind_Context *);
+
+ _Unwind_Ptr _Unwind_GetDataRelBase (_Unwind_Context *);
+ /* This should never be used. */
+ _Unwind_Ptr _Unwind_GetTextRelBase (_Unwind_Context *);
+
+ /* Interface functions: */
+ _Unwind_Reason_Code _Unwind_RaiseException(_Unwind_Control_Block *ucbp);
+ void __attribute__((noreturn)) _Unwind_Resume(_Unwind_Control_Block *ucbp);
+ _Unwind_Reason_Code _Unwind_Resume_or_Rethrow (_Unwind_Control_Block *ucbp);
+
+ typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn)
+ (int, _Unwind_Action, _Unwind_Exception_Class,
+ _Unwind_Control_Block *, struct _Unwind_Context *, void *);
+ _Unwind_Reason_Code _Unwind_ForcedUnwind (_Unwind_Control_Block *,
+ _Unwind_Stop_Fn, void *);
+ /* @@@ Use unwind data to perform a stack backtrace. The trace callback
+ is called for every stack frame in the call chain, but no cleanup
+ actions are performed. */
+ typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn) (_Unwind_Context *, void *);
+ _Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn,
+ void*);
+
+ _Unwind_Word _Unwind_GetCFA (struct _Unwind_Context *);
+ void _Unwind_Complete(_Unwind_Control_Block *ucbp);
+ void _Unwind_DeleteException (_Unwind_Exception *);
+
+ _Unwind_Reason_Code __gnu_unwind_frame (_Unwind_Control_Block *,
+ _Unwind_Context *);
+ _Unwind_Reason_Code __gnu_unwind_execute (_Unwind_Context *,
+ __gnu_unwind_state *);
+
+ static inline _Unwind_Word
+ _Unwind_GetGR (_Unwind_Context *context, int regno)
+ {
+ _uw val;
+ _Unwind_VRS_Get (context, _UVRSC_CORE, regno, _UVRSD_UINT32, &val);
+ return val;
+ }
+
+#define _Unwind_GetIPInfo(context, ip_before_insn) \
+ (*ip_before_insn = 0, _Unwind_GetIP (context))
+
+ static inline void
+ _Unwind_SetGR (_Unwind_Context *context, int regno, _Unwind_Word val)
+ {
+ _Unwind_VRS_Set (context, _UVRSC_CORE, regno, _UVRSD_UINT32, &val);
+ }
+
+ _Unwind_Ptr _Unwind_GetRegionStart (_Unwind_Context *);
+ void * _Unwind_GetLanguageSpecificData (_Unwind_Context *);
+
+/* leb128 type numbers have a potentially unlimited size.
+ The target of the following definitions of _sleb128_t and _uleb128_t
+ is to have efficient data types large enough to hold the leb128 type
+ numbers used in the unwind code. */
+typedef long _sleb128_t;
+typedef unsigned long _uleb128_t;
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* defined UNWIND_ARM_COMMON_H */
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include/unwind.h b/lib/gcc/arm-linux-androideabi/4.7/include/unwind.h
new file mode 100644
index 0000000..4300c8e
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include/unwind.h
@@ -0,0 +1,83 @@
+/* Header file for the ARM EABI unwinder
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011
+ Free Software Foundation, Inc.
+ Contributed by Paul Brook
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Language-independent unwinder header public defines. This contains both
+ ABI defined objects, and GNU support routines. */
+
+#ifndef UNWIND_ARM_H
+#define UNWIND_ARM_H
+
+#include "unwind-arm-common.h"
+
+#define UNWIND_STACK_REG 13
+/* Use IP as a scratch register within the personality routine. */
+#define UNWIND_POINTER_REG 12
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ /* Decode an R_ARM_TARGET2 relocation. */
+ static inline _Unwind_Word
+ _Unwind_decode_typeinfo_ptr (_Unwind_Word base, _Unwind_Word ptr)
+ {
+ _Unwind_Word tmp;
+
+ tmp = *(_Unwind_Word *) ptr;
+ /* Zero values are always NULL. */
+ if (!tmp)
+ return 0;
+
+#if (defined(linux) && !defined(__uClinux__)) || defined(__NetBSD__)
+ /* Pc-relative indirect. */
+#define _GLIBCXX_OVERRIDE_TTYPE_ENCODING (DW_EH_PE_pcrel | DW_EH_PE_indirect)
+ tmp += ptr;
+ tmp = *(_Unwind_Word *) tmp;
+#elif defined(__symbian__) || defined(__uClinux__)
+#define _GLIBCXX_OVERRIDE_TTYPE_ENCODING (DW_EH_PE_absptr)
+ /* Absolute pointer. Nothing more to do. */
+#else
+#define _GLIBCXX_OVERRIDE_TTYPE_ENCODING (DW_EH_PE_pcrel)
+ /* Pc-relative pointer. */
+ tmp += ptr;
+#endif
+ return tmp;
+ }
+
+ static inline _Unwind_Reason_Code
+ __gnu_unwind_24bit (_Unwind_Context * context, _uw data, int compact)
+ {
+ return _URC_FAILURE;
+ }
+ /* Return the address of the instruction, not the actual IP value. */
+#define _Unwind_GetIP(context) \
+ (_Unwind_GetGR (context, 15) & ~(_Unwind_Word)1)
+
+#define _Unwind_SetIP(context, val) \
+ _Unwind_SetGR (context, 15, val | (_Unwind_GetGR (context, 15) & 1))
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* defined UNWIND_ARM_H */
diff --git a/lib/gcc/arm-linux-androideabi/4.7/include/varargs.h b/lib/gcc/arm-linux-androideabi/4.7/include/varargs.h
new file mode 100644
index 0000000..4b9803e
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/include/varargs.h
@@ -0,0 +1,7 @@
+#ifndef _VARARGS_H
+#define _VARARGS_H
+
+#error "GCC no longer implements <varargs.h>."
+#error "Revise your code to use <stdarg.h>."
+
+#endif
diff --git a/lib/gcc/arm-linux-androideabi/4.7/libgcc.a b/lib/gcc/arm-linux-androideabi/4.7/libgcc.a
new file mode 100644
index 0000000..880aef5
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/libgcov.a b/lib/gcc/arm-linux-androideabi/4.7/libgcov.a
new file mode 100644
index 0000000..1a154ee
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/thumb/crtbegin.o b/lib/gcc/arm-linux-androideabi/4.7/thumb/crtbegin.o
new file mode 100644
index 0000000..0887c30
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/thumb/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/thumb/crtbeginS.o b/lib/gcc/arm-linux-androideabi/4.7/thumb/crtbeginS.o
new file mode 100644
index 0000000..965e303
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/thumb/crtbeginS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/thumb/crtbeginT.o b/lib/gcc/arm-linux-androideabi/4.7/thumb/crtbeginT.o
new file mode 100644
index 0000000..0887c30
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/thumb/crtbeginT.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/thumb/crtend.o b/lib/gcc/arm-linux-androideabi/4.7/thumb/crtend.o
new file mode 100644
index 0000000..a0ca4af
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/thumb/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/thumb/crtendS.o b/lib/gcc/arm-linux-androideabi/4.7/thumb/crtendS.o
new file mode 100644
index 0000000..a0ca4af
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/thumb/crtendS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/thumb/libgcc.a b/lib/gcc/arm-linux-androideabi/4.7/thumb/libgcc.a
new file mode 100644
index 0000000..56e74b4
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/thumb/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.7/thumb/libgcov.a b/lib/gcc/arm-linux-androideabi/4.7/thumb/libgcov.a
new file mode 100644
index 0000000..a080e24
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.7/thumb/libgcov.a
Binary files differ
diff --git a/lib/libarm-linux-android-sim.a b/lib/libarm-linux-android-sim.a
new file mode 100644
index 0000000..ab17cb2
--- /dev/null
+++ b/lib/libarm-linux-android-sim.a
Binary files differ
diff --git a/lib/x86_64/libiberty.a b/lib/x86_64/libiberty.a
new file mode 100644
index 0000000..d31569a
--- /dev/null
+++ b/lib/x86_64/libiberty.a
Binary files differ
diff --git a/lib32/Android.mk b/lib32/Android.mk
new file mode 100644
index 0000000..ae50302
--- /dev/null
+++ b/lib32/Android.mk
@@ -0,0 +1,24 @@
+# Copyright (C) 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH := $(call my-dir)
+
+LOCAL_PREBUILT_LIBS := \
+ libbfd.a \
+ libiberty.a \
+ libintl.a
+
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_HOST_PREBUILT)
diff --git a/lib32/libbfd.a b/lib32/libbfd.a
new file mode 100644
index 0000000..2b15215
--- /dev/null
+++ b/lib32/libbfd.a
Binary files differ
diff --git a/lib32/libbfd.la b/lib32/libbfd.la
new file mode 100755
index 0000000..49c7d3b
--- /dev/null
+++ b/lib32/libbfd.la
@@ -0,0 +1,41 @@
+# libbfd.la - a libtool library file
+# Generated by libtool (GNU libtool 1.3134 2009-11-29) 2.2.7a
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname=''
+
+# Names of this library.
+library_names=''
+
+# The name of the static archive.
+old_library='libbfd.a'
+
+# Linker flags that can not go in dependency_libs.
+inherited_linker_flags=' '
+
+# Libraries that this one depends upon.
+dependency_libs=' -lz'
+
+# Names of additional weak libraries provided by this library
+weak_library_names=''
+
+# Version information for libbfd.
+current=0
+age=0
+revision=0
+
+# Is this an already installed library?
+installed=yes
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=no
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir='/Volumes/android/toolchain-build-4.7/prefix/x86_64-apple-darwin/arm-linux-androideabi/lib'
diff --git a/lib32/libiberty.a b/lib32/libiberty.a
new file mode 100644
index 0000000..4f313f1
--- /dev/null
+++ b/lib32/libiberty.a
Binary files differ
diff --git a/lib32/libintl.a b/lib32/libintl.a
new file mode 100644
index 0000000..41c4fe1
--- /dev/null
+++ b/lib32/libintl.a
Binary files differ
diff --git a/libexec/gcc/arm-linux-androideabi/4.7/cc1 b/libexec/gcc/arm-linux-androideabi/4.7/cc1
new file mode 100755
index 0000000..f05b4f2
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.7/cc1
Binary files differ
diff --git a/libexec/gcc/arm-linux-androideabi/4.7/cc1plus b/libexec/gcc/arm-linux-androideabi/4.7/cc1plus
new file mode 100755
index 0000000..a31311a
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.7/cc1plus
Binary files differ
diff --git a/libexec/gcc/arm-linux-androideabi/4.7/collect2 b/libexec/gcc/arm-linux-androideabi/4.7/collect2
new file mode 100755
index 0000000..6ca334f
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.7/collect2
Binary files differ
diff --git a/libexec/gcc/arm-linux-androideabi/4.7/liblto_plugin.0.so b/libexec/gcc/arm-linux-androideabi/4.7/liblto_plugin.0.so
new file mode 100755
index 0000000..9d8dae8
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.7/liblto_plugin.0.so
Binary files differ
diff --git a/libexec/gcc/arm-linux-androideabi/4.7/liblto_plugin.la b/libexec/gcc/arm-linux-androideabi/4.7/liblto_plugin.la
new file mode 100755
index 0000000..00692a8
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.7/liblto_plugin.la
@@ -0,0 +1,41 @@
+# liblto_plugin.la - a libtool library file
+# Generated by libtool (GNU libtool 1.3134 2009-11-29) 2.2.7a
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname='liblto_plugin.0.so'
+
+# Names of this library.
+library_names='liblto_plugin.0.so liblto_plugin.so'
+
+# The name of the static archive.
+old_library=''
+
+# Linker flags that can not go in dependency_libs.
+inherited_linker_flags=' '
+
+# Libraries that this one depends upon.
+dependency_libs=''
+
+# Names of additional weak libraries provided by this library
+weak_library_names=''
+
+# Version information for liblto_plugin.
+current=0
+age=0
+revision=0
+
+# Is this an already installed library?
+installed=yes
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=yes
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir='/Volumes/android/toolchain-build-4.7/prefix/libexec/gcc/arm-linux-androideabi/4.7'
diff --git a/libexec/gcc/arm-linux-androideabi/4.7/liblto_plugin.so b/libexec/gcc/arm-linux-androideabi/4.7/liblto_plugin.so
new file mode 100755
index 0000000..9d8dae8
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.7/liblto_plugin.so
Binary files differ
diff --git a/libexec/gcc/arm-linux-androideabi/4.7/lto-wrapper b/libexec/gcc/arm-linux-androideabi/4.7/lto-wrapper
new file mode 100755
index 0000000..e60058a
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.7/lto-wrapper
Binary files differ
diff --git a/libexec/gcc/arm-linux-androideabi/4.7/lto1 b/libexec/gcc/arm-linux-androideabi/4.7/lto1
new file mode 100755
index 0000000..8f733fa
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.7/lto1
Binary files differ
diff --git a/toolchain.mk b/toolchain.mk
new file mode 100644
index 0000000..e0bba43
--- /dev/null
+++ b/toolchain.mk
@@ -0,0 +1,17 @@
+# Copyright (C) 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH := $(call my-dir)
+
+include $(call all-makefiles-under, $(LOCAL_PATH))