summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRong Xu <xur@google.com>2014-09-07 18:04:58 +0000
committerAndroid Git Automerger <android-git-automerger@android.com>2014-09-07 18:04:58 +0000
commit62f565dbb15fca160cc060bbc28e00f43b0007ef (patch)
treef0d050eef2ddd82bf13978b176909a4de1ecac37
parent140b85928b8c4b929047f6653ee88235d8d61bd8 (diff)
parent90135de03f0a5e1e430d212d21a33317e31b005a (diff)
downloadarm-linux-androideabi-4.9-62f565dbb15fca160cc060bbc28e00f43b0007ef.tar.gz
am 90135de0: [linux-x86] Add gcc-4.9 prebuilds for ARM gcc.4-9 source is from google/gcc-4_9 branch.
* commit '90135de03f0a5e1e430d212d21a33317e31b005a': [linux-x86] Add gcc-4.9 prebuilds for ARM gcc.4-9 source is from google/gcc-4_9 branch. Initial empty repository
-rw-r--r--.Android.mk.un~bin0 -> 627 bytes
-rw-r--r--.toolchain.mk.un~bin0 -> 627 bytes
-rw-r--r--Android.mk16
l---------arm-linux-androideabi/bin/ar1
l---------arm-linux-androideabi/bin/as1
l---------arm-linux-androideabi/bin/ld1
l---------arm-linux-androideabi/bin/ld.bfd1
l---------arm-linux-androideabi/bin/ld.gold1
l---------arm-linux-androideabi/bin/nm1
l---------arm-linux-androideabi/bin/objcopy1
l---------arm-linux-androideabi/bin/objdump1
l---------arm-linux-androideabi/bin/ranlib1
l---------arm-linux-androideabi/bin/strip1
-rw-r--r--arm-linux-androideabi/lib/armv7-a/hard/libatomic.abin0 -> 186930 bytes
-rw-r--r--arm-linux-androideabi/lib/armv7-a/hard/libgomp.abin0 -> 465396 bytes
-rw-r--r--arm-linux-androideabi/lib/armv7-a/hard/libgomp.spec3
-rw-r--r--arm-linux-androideabi/lib/armv7-a/libatomic.abin0 -> 186682 bytes
-rw-r--r--arm-linux-androideabi/lib/armv7-a/libgomp.abin0 -> 465272 bytes
-rw-r--r--arm-linux-androideabi/lib/armv7-a/libgomp.spec3
-rw-r--r--arm-linux-androideabi/lib/armv7-a/thumb/hard/libatomic.abin0 -> 185726 bytes
-rw-r--r--arm-linux-androideabi/lib/armv7-a/thumb/hard/libgomp.abin0 -> 459000 bytes
-rw-r--r--arm-linux-androideabi/lib/armv7-a/thumb/hard/libgomp.spec3
-rw-r--r--arm-linux-androideabi/lib/armv7-a/thumb/libatomic.abin0 -> 185366 bytes
-rw-r--r--arm-linux-androideabi/lib/armv7-a/thumb/libgomp.abin0 -> 458916 bytes
-rw-r--r--arm-linux-androideabi/lib/armv7-a/thumb/libgomp.spec3
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.x243
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xbn240
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xc245
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xd242
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdc245
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdw245
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xn242
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xr162
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xs231
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsc231
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsw230
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xu163
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xw245
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.x243
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xbn240
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xc245
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xd242
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdc245
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdw245
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xn242
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xr162
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xs231
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsc231
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsw230
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xu163
-rw-r--r--arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xw245
-rw-r--r--arm-linux-androideabi/lib/libatomic.abin0 -> 252042 bytes
-rw-r--r--arm-linux-androideabi/lib/libgomp.abin0 -> 467524 bytes
-rw-r--r--arm-linux-androideabi/lib/libgomp.spec3
-rw-r--r--arm-linux-androideabi/lib/thumb/libatomic.abin0 -> 253630 bytes
-rw-r--r--arm-linux-androideabi/lib/thumb/libgomp.abin0 -> 459752 bytes
-rw-r--r--arm-linux-androideabi/lib/thumb/libgomp.spec3
-rwxr-xr-xbin/arm-linux-androideabi-addr2linebin0 -> 707960 bytes
-rwxr-xr-xbin/arm-linux-androideabi-arbin0 -> 736160 bytes
-rwxr-xr-xbin/arm-linux-androideabi-asbin0 -> 1293152 bytes
l---------bin/arm-linux-androideabi-c++1
-rwxr-xr-xbin/arm-linux-androideabi-c++filtbin0 -> 705880 bytes
-rwxr-xr-xbin/arm-linux-androideabi-cppbin0 -> 780808 bytes
-rwxr-xr-xbin/arm-linux-androideabi-dwpbin0 -> 2712360 bytes
-rwxr-xr-xbin/arm-linux-androideabi-elfeditbin0 -> 27944 bytes
-rwxr-xr-xbin/arm-linux-androideabi-g++bin0 -> 781160 bytes
-rwxr-xr-xbin/arm-linux-androideabi-gccbin0 -> 780808 bytes
l---------bin/arm-linux-androideabi-gcc-4.91
-rwxr-xr-xbin/arm-linux-androideabi-gcc-4.9.x-googlebin0 -> 780808 bytes
-rwxr-xr-xbin/arm-linux-androideabi-gcc-arbin0 -> 25440 bytes
-rwxr-xr-xbin/arm-linux-androideabi-gcc-nmbin0 -> 25408 bytes
-rwxr-xr-xbin/arm-linux-androideabi-gcc-ranlibbin0 -> 25440 bytes
-rwxr-xr-xbin/arm-linux-androideabi-gcovbin0 -> 422024 bytes
-rwxr-xr-xbin/arm-linux-androideabi-gcov-toolbin0 -> 450696 bytes
-rwxr-xr-xbin/arm-linux-androideabi-gdbbin0 -> 4211256 bytes
-rwxr-xr-xbin/arm-linux-androideabi-gprofbin0 -> 776728 bytes
l---------bin/arm-linux-androideabi-ld1
-rwxr-xr-xbin/arm-linux-androideabi-ld.bfdbin0 -> 1190464 bytes
-rwxr-xr-xbin/arm-linux-androideabi-ld.goldbin0 -> 3868056 bytes
-rwxr-xr-xbin/arm-linux-androideabi-nmbin0 -> 719032 bytes
-rwxr-xr-xbin/arm-linux-androideabi-objcopybin0 -> 892664 bytes
-rwxr-xr-xbin/arm-linux-androideabi-objdumpbin0 -> 1127640 bytes
-rwxr-xr-xbin/arm-linux-androideabi-ranlibbin0 -> 736192 bytes
-rwxr-xr-xbin/arm-linux-androideabi-readelfbin0 -> 416616 bytes
-rwxr-xr-xbin/arm-linux-androideabi-sizebin0 -> 708824 bytes
-rwxr-xr-xbin/arm-linux-androideabi-stringsbin0 -> 707736 bytes
-rwxr-xr-xbin/arm-linux-androideabi-stripbin0 -> 892696 bytes
-rw-r--r--include/gdb/jit-reader.h346
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtbegin.obin0 -> 2584 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtbeginS.obin0 -> 2800 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtbeginT.obin0 -> 2584 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtend.obin0 -> 1101 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtendS.obin0 -> 1101 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtbegin.obin0 -> 2588 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtbeginS.obin0 -> 2804 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtbeginT.obin0 -> 2588 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtend.obin0 -> 1105 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtendS.obin0 -> 1105 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/libgcc.abin0 -> 6358266 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/libgcov.abin0 -> 319028 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/libgcc.abin0 -> 6355462 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/libgcov.abin0 -> 318964 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtbegin.obin0 -> 2460 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtbeginS.obin0 -> 2668 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtbeginT.obin0 -> 2460 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtend.obin0 -> 1101 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtendS.obin0 -> 1101 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtbegin.obin0 -> 2464 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtbeginS.obin0 -> 2672 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtbeginT.obin0 -> 2464 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtend.obin0 -> 1105 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtendS.obin0 -> 1105 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/libgcc.abin0 -> 6359542 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/libgcov.abin0 -> 313008 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/libgcc.abin0 -> 6354322 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/libgcov.abin0 -> 313140 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/crtbegin.obin0 -> 2580 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/crtbeginS.obin0 -> 2796 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/crtbeginT.obin0 -> 2580 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/crtend.obin0 -> 1097 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/crtendS.obin0 -> 1097 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/gcov-src/gcov-io.c1088
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/gcov-src/gcov-io.h489
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/gcov-src/gcov-iov.h4
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/gcov-src/libgcov-driver.c1193
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/README14
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/limits.h171
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/linux/a.out.h229
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/stdio.h441
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/syslimits.h8
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/arm_acle.h100
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/arm_neon.h13817
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/float.h277
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/iso646.h45
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/mmintrin.h1836
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/omp.h127
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdalign.h39
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdarg.h126
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdatomic.h252
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdbool.h50
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/stddef.h439
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdfix.h204
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdint-gcc.h263
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdint.h14
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdnoreturn.h35
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/unwind-arm-common.h250
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/unwind.h85
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/include/varargs.h7
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/libgcc.abin0 -> 6335246 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/libgcov.abin0 -> 318324 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtbegin.obin0 -> 2472 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtbeginS.obin0 -> 2680 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtbeginT.obin0 -> 2472 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtend.obin0 -> 1097 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtendS.obin0 -> 1097 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/libgcc.abin0 -> 6346014 bytes
-rw-r--r--lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/libgcov.abin0 -> 313684 bytes
-rw-r--r--lib/libarm-linux-android-sim.abin0 -> 371454 bytes
-rw-r--r--lib64/libiberty.abin0 -> 439048 bytes
-rwxr-xr-xlibexec/gcc/arm-linux-androideabi/4.9.x-google/cc1bin0 -> 16259928 bytes
-rwxr-xr-xlibexec/gcc/arm-linux-androideabi/4.9.x-google/cc1plusbin0 -> 17393176 bytes
-rwxr-xr-xlibexec/gcc/arm-linux-androideabi/4.9.x-google/collect2bin0 -> 442632 bytes
l---------libexec/gcc/arm-linux-androideabi/4.9.x-google/libfunction_reordering_plugin.so1
l---------libexec/gcc/arm-linux-androideabi/4.9.x-google/libfunction_reordering_plugin.so.01
-rwxr-xr-xlibexec/gcc/arm-linux-androideabi/4.9.x-google/libfunction_reordering_plugin.so.0.0.0bin0 -> 43840 bytes
l---------libexec/gcc/arm-linux-androideabi/4.9.x-google/liblto_plugin.so1
l---------libexec/gcc/arm-linux-androideabi/4.9.x-google/liblto_plugin.so.01
-rwxr-xr-xlibexec/gcc/arm-linux-androideabi/4.9.x-google/liblto_plugin.so.0.0.0bin0 -> 87712 bytes
-rwxr-xr-xlibexec/gcc/arm-linux-androideabi/4.9.x-google/lto-wrapperbin0 -> 635048 bytes
-rwxr-xr-xlibexec/gcc/arm-linux-androideabi/4.9.x-google/lto1bin0 -> 15521016 bytes
-rwxr-xr-xlibexec/gcc/arm-linux-androideabi/4.9.x-google/plugin/gengtypebin0 -> 576880 bytes
-rw-r--r--share/gdb/python/gdb/__init__.py124
-rw-r--r--share/gdb/python/gdb/command/__init__.py16
-rw-r--r--share/gdb/python/gdb/command/explore.py760
-rw-r--r--share/gdb/python/gdb/command/pretty_printers.py368
-rw-r--r--share/gdb/python/gdb/command/prompt.py66
-rw-r--r--share/gdb/python/gdb/command/type_printers.py125
-rw-r--r--share/gdb/python/gdb/function/__init__.py14
-rw-r--r--share/gdb/python/gdb/function/strfns.py108
-rw-r--r--share/gdb/python/gdb/printing.py263
-rw-r--r--share/gdb/python/gdb/prompt.py148
-rw-r--r--share/gdb/python/gdb/types.py176
-rw-r--r--share/gdb/syscalls/amd64-linux.xml314
-rw-r--r--share/gdb/syscalls/gdb-syscalls.dtd14
-rw-r--r--share/gdb/syscalls/i386-linux.xml340
-rw-r--r--share/gdb/syscalls/mips-n32-linux.xml319
-rw-r--r--share/gdb/syscalls/mips-n64-linux.xml312
-rw-r--r--share/gdb/syscalls/mips-o32-linux.xml347
-rw-r--r--share/gdb/syscalls/ppc-linux.xml310
-rw-r--r--share/gdb/syscalls/ppc64-linux.xml295
-rw-r--r--share/gdb/syscalls/sparc-linux.xml344
-rw-r--r--share/gdb/syscalls/sparc64-linux.xml326
-rw-r--r--toolchain.mk17
193 files changed, 33034 insertions, 0 deletions
diff --git a/.Android.mk.un~ b/.Android.mk.un~
new file mode 100644
index 0000000..a480117
--- /dev/null
+++ b/.Android.mk.un~
Binary files differ
diff --git a/.toolchain.mk.un~ b/.toolchain.mk.un~
new file mode 100644
index 0000000..fda86bf
--- /dev/null
+++ b/.toolchain.mk.un~
Binary files differ
diff --git a/Android.mk b/Android.mk
new file mode 100644
index 0000000..1cc2237
--- /dev/null
+++ b/Android.mk
@@ -0,0 +1,16 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Empty makefile here prevents Android.mk in subdirectories from being
+# unconditionally included.
diff --git a/arm-linux-androideabi/bin/ar b/arm-linux-androideabi/bin/ar
new file mode 120000
index 0000000..8c48646
--- /dev/null
+++ b/arm-linux-androideabi/bin/ar
@@ -0,0 +1 @@
+../../bin/arm-linux-androideabi-ar \ No newline at end of file
diff --git a/arm-linux-androideabi/bin/as b/arm-linux-androideabi/bin/as
new file mode 120000
index 0000000..5378e97
--- /dev/null
+++ b/arm-linux-androideabi/bin/as
@@ -0,0 +1 @@
+../../bin/arm-linux-androideabi-as \ No newline at end of file
diff --git a/arm-linux-androideabi/bin/ld b/arm-linux-androideabi/bin/ld
new file mode 120000
index 0000000..dddda83
--- /dev/null
+++ b/arm-linux-androideabi/bin/ld
@@ -0,0 +1 @@
+../../bin/arm-linux-androideabi-ld \ No newline at end of file
diff --git a/arm-linux-androideabi/bin/ld.bfd b/arm-linux-androideabi/bin/ld.bfd
new file mode 120000
index 0000000..57728a0
--- /dev/null
+++ b/arm-linux-androideabi/bin/ld.bfd
@@ -0,0 +1 @@
+../../bin/arm-linux-androideabi-ld.bfd \ No newline at end of file
diff --git a/arm-linux-androideabi/bin/ld.gold b/arm-linux-androideabi/bin/ld.gold
new file mode 120000
index 0000000..266c472
--- /dev/null
+++ b/arm-linux-androideabi/bin/ld.gold
@@ -0,0 +1 @@
+../../bin/arm-linux-androideabi-ld.gold \ No newline at end of file
diff --git a/arm-linux-androideabi/bin/nm b/arm-linux-androideabi/bin/nm
new file mode 120000
index 0000000..03e24b6
--- /dev/null
+++ b/arm-linux-androideabi/bin/nm
@@ -0,0 +1 @@
+../../bin/arm-linux-androideabi-nm \ No newline at end of file
diff --git a/arm-linux-androideabi/bin/objcopy b/arm-linux-androideabi/bin/objcopy
new file mode 120000
index 0000000..2ec109f
--- /dev/null
+++ b/arm-linux-androideabi/bin/objcopy
@@ -0,0 +1 @@
+../../bin/arm-linux-androideabi-objcopy \ No newline at end of file
diff --git a/arm-linux-androideabi/bin/objdump b/arm-linux-androideabi/bin/objdump
new file mode 120000
index 0000000..2303680
--- /dev/null
+++ b/arm-linux-androideabi/bin/objdump
@@ -0,0 +1 @@
+../../bin/arm-linux-androideabi-objdump \ No newline at end of file
diff --git a/arm-linux-androideabi/bin/ranlib b/arm-linux-androideabi/bin/ranlib
new file mode 120000
index 0000000..7cf5d4f
--- /dev/null
+++ b/arm-linux-androideabi/bin/ranlib
@@ -0,0 +1 @@
+../../bin/arm-linux-androideabi-ranlib \ No newline at end of file
diff --git a/arm-linux-androideabi/bin/strip b/arm-linux-androideabi/bin/strip
new file mode 120000
index 0000000..3daf7a6
--- /dev/null
+++ b/arm-linux-androideabi/bin/strip
@@ -0,0 +1 @@
+../../bin/arm-linux-androideabi-strip \ No newline at end of file
diff --git a/arm-linux-androideabi/lib/armv7-a/hard/libatomic.a b/arm-linux-androideabi/lib/armv7-a/hard/libatomic.a
new file mode 100644
index 0000000..1d38efe
--- /dev/null
+++ b/arm-linux-androideabi/lib/armv7-a/hard/libatomic.a
Binary files differ
diff --git a/arm-linux-androideabi/lib/armv7-a/hard/libgomp.a b/arm-linux-androideabi/lib/armv7-a/hard/libgomp.a
new file mode 100644
index 0000000..3978f63
--- /dev/null
+++ b/arm-linux-androideabi/lib/armv7-a/hard/libgomp.a
Binary files differ
diff --git a/arm-linux-androideabi/lib/armv7-a/hard/libgomp.spec b/arm-linux-androideabi/lib/armv7-a/hard/libgomp.spec
new file mode 100644
index 0000000..2fd7721
--- /dev/null
+++ b/arm-linux-androideabi/lib/armv7-a/hard/libgomp.spec
@@ -0,0 +1,3 @@
+# This spec file is read by gcc when linking. It is used to specify the
+# standard libraries we need in order to link with libgomp.
+*link_gomp: -lgomp
diff --git a/arm-linux-androideabi/lib/armv7-a/libatomic.a b/arm-linux-androideabi/lib/armv7-a/libatomic.a
new file mode 100644
index 0000000..cff305d
--- /dev/null
+++ b/arm-linux-androideabi/lib/armv7-a/libatomic.a
Binary files differ
diff --git a/arm-linux-androideabi/lib/armv7-a/libgomp.a b/arm-linux-androideabi/lib/armv7-a/libgomp.a
new file mode 100644
index 0000000..449462a
--- /dev/null
+++ b/arm-linux-androideabi/lib/armv7-a/libgomp.a
Binary files differ
diff --git a/arm-linux-androideabi/lib/armv7-a/libgomp.spec b/arm-linux-androideabi/lib/armv7-a/libgomp.spec
new file mode 100644
index 0000000..2fd7721
--- /dev/null
+++ b/arm-linux-androideabi/lib/armv7-a/libgomp.spec
@@ -0,0 +1,3 @@
+# This spec file is read by gcc when linking. It is used to specify the
+# standard libraries we need in order to link with libgomp.
+*link_gomp: -lgomp
diff --git a/arm-linux-androideabi/lib/armv7-a/thumb/hard/libatomic.a b/arm-linux-androideabi/lib/armv7-a/thumb/hard/libatomic.a
new file mode 100644
index 0000000..eba8869
--- /dev/null
+++ b/arm-linux-androideabi/lib/armv7-a/thumb/hard/libatomic.a
Binary files differ
diff --git a/arm-linux-androideabi/lib/armv7-a/thumb/hard/libgomp.a b/arm-linux-androideabi/lib/armv7-a/thumb/hard/libgomp.a
new file mode 100644
index 0000000..6dbab06
--- /dev/null
+++ b/arm-linux-androideabi/lib/armv7-a/thumb/hard/libgomp.a
Binary files differ
diff --git a/arm-linux-androideabi/lib/armv7-a/thumb/hard/libgomp.spec b/arm-linux-androideabi/lib/armv7-a/thumb/hard/libgomp.spec
new file mode 100644
index 0000000..2fd7721
--- /dev/null
+++ b/arm-linux-androideabi/lib/armv7-a/thumb/hard/libgomp.spec
@@ -0,0 +1,3 @@
+# This spec file is read by gcc when linking. It is used to specify the
+# standard libraries we need in order to link with libgomp.
+*link_gomp: -lgomp
diff --git a/arm-linux-androideabi/lib/armv7-a/thumb/libatomic.a b/arm-linux-androideabi/lib/armv7-a/thumb/libatomic.a
new file mode 100644
index 0000000..f4eae27
--- /dev/null
+++ b/arm-linux-androideabi/lib/armv7-a/thumb/libatomic.a
Binary files differ
diff --git a/arm-linux-androideabi/lib/armv7-a/thumb/libgomp.a b/arm-linux-androideabi/lib/armv7-a/thumb/libgomp.a
new file mode 100644
index 0000000..efc2811
--- /dev/null
+++ b/arm-linux-androideabi/lib/armv7-a/thumb/libgomp.a
Binary files differ
diff --git a/arm-linux-androideabi/lib/armv7-a/thumb/libgomp.spec b/arm-linux-androideabi/lib/armv7-a/thumb/libgomp.spec
new file mode 100644
index 0000000..2fd7721
--- /dev/null
+++ b/arm-linux-androideabi/lib/armv7-a/thumb/libgomp.spec
@@ -0,0 +1,3 @@
+# This spec file is read by gcc when linking. It is used to specify the
+# standard libraries we need in order to link with libgomp.
+*link_gomp: -lgomp
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.x b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.x
new file mode 100644
index 0000000..5a430fd
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.x
@@ -0,0 +1,243 @@
+/* Default linker script, for normal executables */
+/* Modified for Android. */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .rela.iplt :
+ {
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xbn b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xbn
new file mode 100644
index 0000000..f7d60a5
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xbn
@@ -0,0 +1,240 @@
+/* Script for -N: mix text and data on same page; don't align data */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .rela.iplt :
+ {
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = .;
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xc b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xc
new file mode 100644
index 0000000..dd51ec6
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xc
@@ -0,0 +1,245 @@
+/* Script for -z combreloc: combine and sort reloc sections */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xd b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xd
new file mode 100644
index 0000000..90d22a2
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xd
@@ -0,0 +1,242 @@
+/* Script for ld -pie: link position independent executable */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .rela.iplt :
+ {
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdc b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdc
new file mode 100644
index 0000000..c1242ea
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdc
@@ -0,0 +1,245 @@
+/* Script for -pie -z combreloc: position independent executable, combine & sort relocs */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdw b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdw
new file mode 100644
index 0000000..2cb001d
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xdw
@@ -0,0 +1,245 @@
+/* Script for -pie -z combreloc -z now -z relro: position independent executable, combine & sort relocs */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xn b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xn
new file mode 100644
index 0000000..0f2c5eb
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xn
@@ -0,0 +1,242 @@
+/* Script for -n: mix text and data on same page */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .rela.iplt :
+ {
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xr b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xr
new file mode 100644
index 0000000..3b5a7d1
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xr
@@ -0,0 +1,162 @@
+/* Script for ld -r: link without relocation */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ /* For some reason, the Solaris linker makes bad executables
+ if gld -r is used and the intermediate file has sections starting
+ at non-zero addresses. Could be a Solaris ld bug, could be a GNU ld
+ bug. But for now assigning the zero vmas works. */
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ .interp 0 : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash 0 : { *(.hash) }
+ .gnu.hash 0 : { *(.gnu.hash) }
+ .dynsym 0 : { *(.dynsym) }
+ .dynstr 0 : { *(.dynstr) }
+ .gnu.version 0 : { *(.gnu.version) }
+ .gnu.version_d 0: { *(.gnu.version_d) }
+ .gnu.version_r 0: { *(.gnu.version_r) }
+ .rel.init 0 : { *(.rel.init) }
+ .rela.init 0 : { *(.rela.init) }
+ .rel.text 0 : { *(.rel.text) }
+ .rela.text 0 : { *(.rela.text) }
+ .rel.fini 0 : { *(.rel.fini) }
+ .rela.fini 0 : { *(.rela.fini) }
+ .rel.rodata 0 : { *(.rel.rodata) }
+ .rela.rodata 0 : { *(.rela.rodata) }
+ .rel.data.rel.ro 0 : { *(.rel.data.rel.ro) }
+ .rela.data.rel.ro 0 : { *(.rela.data.rel.ro) }
+ .rel.data 0 : { *(.rel.data) }
+ .rela.data 0 : { *(.rela.data) }
+ .rel.tdata 0 : { *(.rel.tdata) }
+ .rela.tdata 0 : { *(.rela.tdata) }
+ .rel.tbss 0 : { *(.rel.tbss) }
+ .rela.tbss 0 : { *(.rela.tbss) }
+ .rel.ctors 0 : { *(.rel.ctors) }
+ .rela.ctors 0 : { *(.rela.ctors) }
+ .rel.dtors 0 : { *(.rel.dtors) }
+ .rela.dtors 0 : { *(.rela.dtors) }
+ .rel.got 0 : { *(.rel.got) }
+ .rela.got 0 : { *(.rela.got) }
+ .rel.bss 0 : { *(.rel.bss) }
+ .rela.bss 0 : { *(.rela.bss) }
+ .rel.iplt 0 :
+ {
+ *(.rel.iplt)
+ }
+ .rela.iplt 0 :
+ {
+ *(.rela.iplt)
+ }
+ .rel.plt 0 :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt 0 :
+ {
+ *(.rela.plt)
+ }
+ .init 0 :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt 0 : { *(.plt) }
+ .iplt 0 : { *(.iplt) }
+ .text 0 :
+ {
+ *(.text .stub)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ }
+ .fini 0 :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ .rodata 0 : { *(.rodata) }
+ .rodata1 0 : { *(.rodata1) }
+ .ARM.extab 0 : { *(.ARM.extab) }
+ .ARM.exidx 0 : { *(.ARM.exidx) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ /* Exception handling */
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata 0 : { *(.tdata) }
+ .tbss 0 : { *(.tbss) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ .preinit_array 0 :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .jcr 0 : { KEEP (*(.jcr)) }
+ .dynamic 0 : { *(.dynamic) }
+ .got 0 : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data 0 :
+ {
+ *(.data)
+ }
+ .data1 0 : { *(.data1) }
+ .bss 0 :
+ {
+ *(.dynbss)
+ *(.bss)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ }
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xs b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xs
new file mode 100644
index 0000000..6f628fe
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xs
@@ -0,0 +1,231 @@
+/* Script for ld --shared: link shared library */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = 0 + SIZEOF_HEADERS;
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ *(.rel.iplt)
+ }
+ .rela.iplt :
+ {
+ *(.rela.iplt)
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsc b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsc
new file mode 100644
index 0000000..f2f347c
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsc
@@ -0,0 +1,231 @@
+/* Script for --shared -z combreloc: shared library, combine & sort relocs */
+/* Modified for Android. */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = 0 + SIZEOF_HEADERS;
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ *(.rel.iplt)
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ *(.rela.iplt)
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsw b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsw
new file mode 100644
index 0000000..4baac17
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xsw
@@ -0,0 +1,230 @@
+/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = 0 + SIZEOF_HEADERS;
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ *(.rel.iplt)
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ *(.rela.iplt)
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xu b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xu
new file mode 100644
index 0000000..0a24334
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xu
@@ -0,0 +1,163 @@
+/* Script for ld -Ur: link w/out relocation, do create constructors */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ /* For some reason, the Solaris linker makes bad executables
+ if gld -r is used and the intermediate file has sections starting
+ at non-zero addresses. Could be a Solaris ld bug, could be a GNU ld
+ bug. But for now assigning the zero vmas works. */
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ .interp 0 : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash 0 : { *(.hash) }
+ .gnu.hash 0 : { *(.gnu.hash) }
+ .dynsym 0 : { *(.dynsym) }
+ .dynstr 0 : { *(.dynstr) }
+ .gnu.version 0 : { *(.gnu.version) }
+ .gnu.version_d 0: { *(.gnu.version_d) }
+ .gnu.version_r 0: { *(.gnu.version_r) }
+ .rel.init 0 : { *(.rel.init) }
+ .rela.init 0 : { *(.rela.init) }
+ .rel.text 0 : { *(.rel.text) }
+ .rela.text 0 : { *(.rela.text) }
+ .rel.fini 0 : { *(.rel.fini) }
+ .rela.fini 0 : { *(.rela.fini) }
+ .rel.rodata 0 : { *(.rel.rodata) }
+ .rela.rodata 0 : { *(.rela.rodata) }
+ .rel.data.rel.ro 0 : { *(.rel.data.rel.ro) }
+ .rela.data.rel.ro 0 : { *(.rela.data.rel.ro) }
+ .rel.data 0 : { *(.rel.data) }
+ .rela.data 0 : { *(.rela.data) }
+ .rel.tdata 0 : { *(.rel.tdata) }
+ .rela.tdata 0 : { *(.rela.tdata) }
+ .rel.tbss 0 : { *(.rel.tbss) }
+ .rela.tbss 0 : { *(.rela.tbss) }
+ .rel.ctors 0 : { *(.rel.ctors) }
+ .rela.ctors 0 : { *(.rela.ctors) }
+ .rel.dtors 0 : { *(.rel.dtors) }
+ .rela.dtors 0 : { *(.rela.dtors) }
+ .rel.got 0 : { *(.rel.got) }
+ .rela.got 0 : { *(.rela.got) }
+ .rel.bss 0 : { *(.rel.bss) }
+ .rela.bss 0 : { *(.rela.bss) }
+ .rel.iplt 0 :
+ {
+ *(.rel.iplt)
+ }
+ .rela.iplt 0 :
+ {
+ *(.rela.iplt)
+ }
+ .rel.plt 0 :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt 0 :
+ {
+ *(.rela.plt)
+ }
+ .init 0 :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt 0 : { *(.plt) }
+ .iplt 0 : { *(.iplt) }
+ .text 0 :
+ {
+ *(.text .stub)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ }
+ .fini 0 :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ .rodata 0 : { *(.rodata) }
+ .rodata1 0 : { *(.rodata1) }
+ .ARM.extab 0 : { *(.ARM.extab) }
+ .ARM.exidx 0 : { *(.ARM.exidx) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ /* Exception handling */
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata 0 : { *(.tdata) }
+ .tbss 0 : { *(.tbss) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ .preinit_array 0 :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .jcr 0 : { KEEP (*(.jcr)) }
+ .dynamic 0 : { *(.dynamic) }
+ .got 0 : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data 0 :
+ {
+ *(.data)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 0 : { *(.data1) }
+ .bss 0 :
+ {
+ *(.dynbss)
+ *(.bss)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ }
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xw b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xw
new file mode 100644
index 0000000..626ffd4
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelf_linux_eabi.xw
@@ -0,0 +1,245 @@
+/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.x b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.x
new file mode 100644
index 0000000..cc8f8cc
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.x
@@ -0,0 +1,243 @@
+/* Default linker script, for normal executables */
+/* Modified for Android. */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .rela.iplt :
+ {
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xbn b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xbn
new file mode 100644
index 0000000..09e0ff5
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xbn
@@ -0,0 +1,240 @@
+/* Script for -N: mix text and data on same page; don't align data */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .rela.iplt :
+ {
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = .;
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xc b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xc
new file mode 100644
index 0000000..90c26d0
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xc
@@ -0,0 +1,245 @@
+/* Script for -z combreloc: combine and sort reloc sections */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xd b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xd
new file mode 100644
index 0000000..5c70779
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xd
@@ -0,0 +1,242 @@
+/* Script for ld -pie: link position independent executable */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .rela.iplt :
+ {
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdc b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdc
new file mode 100644
index 0000000..3449f5b
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdc
@@ -0,0 +1,245 @@
+/* Script for -pie -z combreloc: position independent executable, combine & sort relocs */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdw b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdw
new file mode 100644
index 0000000..e46572a
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xdw
@@ -0,0 +1,245 @@
+/* Script for -pie -z combreloc -z now -z relro: position independent executable, combine & sort relocs */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0); . = 0 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xn b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xn
new file mode 100644
index 0000000..6f8775b
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xn
@@ -0,0 +1,242 @@
+/* Script for -n: mix text and data on same page */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .rela.iplt :
+ {
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xr b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xr
new file mode 100644
index 0000000..c4bf01b
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xr
@@ -0,0 +1,162 @@
+/* Script for ld -r: link without relocation */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ /* For some reason, the Solaris linker makes bad executables
+ if gld -r is used and the intermediate file has sections starting
+ at non-zero addresses. Could be a Solaris ld bug, could be a GNU ld
+ bug. But for now assigning the zero vmas works. */
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ .interp 0 : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash 0 : { *(.hash) }
+ .gnu.hash 0 : { *(.gnu.hash) }
+ .dynsym 0 : { *(.dynsym) }
+ .dynstr 0 : { *(.dynstr) }
+ .gnu.version 0 : { *(.gnu.version) }
+ .gnu.version_d 0: { *(.gnu.version_d) }
+ .gnu.version_r 0: { *(.gnu.version_r) }
+ .rel.init 0 : { *(.rel.init) }
+ .rela.init 0 : { *(.rela.init) }
+ .rel.text 0 : { *(.rel.text) }
+ .rela.text 0 : { *(.rela.text) }
+ .rel.fini 0 : { *(.rel.fini) }
+ .rela.fini 0 : { *(.rela.fini) }
+ .rel.rodata 0 : { *(.rel.rodata) }
+ .rela.rodata 0 : { *(.rela.rodata) }
+ .rel.data.rel.ro 0 : { *(.rel.data.rel.ro) }
+ .rela.data.rel.ro 0 : { *(.rela.data.rel.ro) }
+ .rel.data 0 : { *(.rel.data) }
+ .rela.data 0 : { *(.rela.data) }
+ .rel.tdata 0 : { *(.rel.tdata) }
+ .rela.tdata 0 : { *(.rela.tdata) }
+ .rel.tbss 0 : { *(.rel.tbss) }
+ .rela.tbss 0 : { *(.rela.tbss) }
+ .rel.ctors 0 : { *(.rel.ctors) }
+ .rela.ctors 0 : { *(.rela.ctors) }
+ .rel.dtors 0 : { *(.rel.dtors) }
+ .rela.dtors 0 : { *(.rela.dtors) }
+ .rel.got 0 : { *(.rel.got) }
+ .rela.got 0 : { *(.rela.got) }
+ .rel.bss 0 : { *(.rel.bss) }
+ .rela.bss 0 : { *(.rela.bss) }
+ .rel.iplt 0 :
+ {
+ *(.rel.iplt)
+ }
+ .rela.iplt 0 :
+ {
+ *(.rela.iplt)
+ }
+ .rel.plt 0 :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt 0 :
+ {
+ *(.rela.plt)
+ }
+ .init 0 :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt 0 : { *(.plt) }
+ .iplt 0 : { *(.iplt) }
+ .text 0 :
+ {
+ *(.text .stub)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ }
+ .fini 0 :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ .rodata 0 : { *(.rodata) }
+ .rodata1 0 : { *(.rodata1) }
+ .ARM.extab 0 : { *(.ARM.extab) }
+ .ARM.exidx 0 : { *(.ARM.exidx) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ /* Exception handling */
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata 0 : { *(.tdata) }
+ .tbss 0 : { *(.tbss) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ .preinit_array 0 :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .jcr 0 : { KEEP (*(.jcr)) }
+ .dynamic 0 : { *(.dynamic) }
+ .got 0 : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data 0 :
+ {
+ *(.data)
+ }
+ .data1 0 : { *(.data1) }
+ .bss 0 :
+ {
+ *(.dynbss)
+ *(.bss)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ }
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xs b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xs
new file mode 100644
index 0000000..ceaa0c4
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xs
@@ -0,0 +1,231 @@
+/* Script for ld --shared: link shared library */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = 0 + SIZEOF_HEADERS;
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rela.data.rel.ro : { *(.rela.data.rel.ro .rela.data.rel.ro.* .rela.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+ .rel.iplt :
+ {
+ *(.rel.iplt)
+ }
+ .rela.iplt :
+ {
+ *(.rela.iplt)
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsc b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsc
new file mode 100644
index 0000000..3e563a1
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsc
@@ -0,0 +1,231 @@
+/* Script for --shared -z combreloc: shared library, combine & sort relocs */
+/* Modified for Android. */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = 0 + SIZEOF_HEADERS;
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ *(.rel.iplt)
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ *(.rela.iplt)
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsw b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsw
new file mode 100644
index 0000000..6c384fa
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xsw
@@ -0,0 +1,230 @@
+/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = 0 + SIZEOF_HEADERS;
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ *(.rel.iplt)
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ *(.rela.iplt)
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xu b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xu
new file mode 100644
index 0000000..1992fd3
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xu
@@ -0,0 +1,163 @@
+/* Script for ld -Ur: link w/out relocation, do create constructors */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ /* For some reason, the Solaris linker makes bad executables
+ if gld -r is used and the intermediate file has sections starting
+ at non-zero addresses. Could be a Solaris ld bug, could be a GNU ld
+ bug. But for now assigning the zero vmas works. */
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ .interp 0 : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash 0 : { *(.hash) }
+ .gnu.hash 0 : { *(.gnu.hash) }
+ .dynsym 0 : { *(.dynsym) }
+ .dynstr 0 : { *(.dynstr) }
+ .gnu.version 0 : { *(.gnu.version) }
+ .gnu.version_d 0: { *(.gnu.version_d) }
+ .gnu.version_r 0: { *(.gnu.version_r) }
+ .rel.init 0 : { *(.rel.init) }
+ .rela.init 0 : { *(.rela.init) }
+ .rel.text 0 : { *(.rel.text) }
+ .rela.text 0 : { *(.rela.text) }
+ .rel.fini 0 : { *(.rel.fini) }
+ .rela.fini 0 : { *(.rela.fini) }
+ .rel.rodata 0 : { *(.rel.rodata) }
+ .rela.rodata 0 : { *(.rela.rodata) }
+ .rel.data.rel.ro 0 : { *(.rel.data.rel.ro) }
+ .rela.data.rel.ro 0 : { *(.rela.data.rel.ro) }
+ .rel.data 0 : { *(.rel.data) }
+ .rela.data 0 : { *(.rela.data) }
+ .rel.tdata 0 : { *(.rel.tdata) }
+ .rela.tdata 0 : { *(.rela.tdata) }
+ .rel.tbss 0 : { *(.rel.tbss) }
+ .rela.tbss 0 : { *(.rela.tbss) }
+ .rel.ctors 0 : { *(.rel.ctors) }
+ .rela.ctors 0 : { *(.rela.ctors) }
+ .rel.dtors 0 : { *(.rel.dtors) }
+ .rela.dtors 0 : { *(.rela.dtors) }
+ .rel.got 0 : { *(.rel.got) }
+ .rela.got 0 : { *(.rela.got) }
+ .rel.bss 0 : { *(.rel.bss) }
+ .rela.bss 0 : { *(.rela.bss) }
+ .rel.iplt 0 :
+ {
+ *(.rel.iplt)
+ }
+ .rela.iplt 0 :
+ {
+ *(.rela.iplt)
+ }
+ .rel.plt 0 :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt 0 :
+ {
+ *(.rela.plt)
+ }
+ .init 0 :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt 0 : { *(.plt) }
+ .iplt 0 : { *(.iplt) }
+ .text 0 :
+ {
+ *(.text .stub)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ }
+ .fini 0 :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ .rodata 0 : { *(.rodata) }
+ .rodata1 0 : { *(.rodata1) }
+ .ARM.extab 0 : { *(.ARM.extab) }
+ .ARM.exidx 0 : { *(.ARM.exidx) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ /* Exception handling */
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata 0 : { *(.tdata) }
+ .tbss 0 : { *(.tbss) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ .preinit_array 0 :
+ {
+ KEEP (*(.preinit_array))
+ }
+ .jcr 0 : { KEEP (*(.jcr)) }
+ .dynamic 0 : { *(.dynamic) }
+ .got 0 : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ .data 0 :
+ {
+ *(.data)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 0 : { *(.data1) }
+ .bss 0 :
+ {
+ *(.dynbss)
+ *(.bss)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ }
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+}
diff --git a/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xw b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xw
new file mode 100644
index 0000000..c76c991
--- /dev/null
+++ b/arm-linux-androideabi/lib/ldscripts/armelfb_linux_eabi.xw
@@ -0,0 +1,245 @@
+/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */
+OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x00008000); . = 0x00008000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro .rel.data.rel.ro.* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ PROVIDE_HIDDEN (__rela_iplt_start = .);
+ *(.rela.iplt)
+ PROVIDE_HIDDEN (__rela_iplt_end = .);
+ }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ }
+ .rela.plt :
+ {
+ *(.rela.plt)
+ }
+ .init :
+ {
+ KEEP (*(SORT_NONE(.init)))
+ }
+ .plt : { *(.plt) }
+ .iplt : { *(.iplt) }
+ .text :
+ {
+ *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+ *(.text.exit .text.exit.*)
+ *(.text.startup .text.startup.*)
+ *(.text.hot .text.hot.*)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+ }
+ .fini :
+ {
+ KEEP (*(SORT_NONE(.fini)))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .ARM.extab : { *(.ARM.extab* .gnu.linkonce.armextab.*) }
+ PROVIDE_HIDDEN (__exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
+ PROVIDE_HIDDEN (__exidx_end = .);
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* These sections are generated by the Sun/Oracle C++ compiler. */
+ .exception_ranges : ONLY_IF_RO { *(.exception_ranges
+ .exception_ranges*) }
+ /* Adjust the address for the data segment. For 32 bits we want to align
+ at exactly a page boundary to make life easier for apriori. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ /* Ensure the __preinit_array_start label is properly aligned. We
+ could instead move the label definition inside the section, but
+ the linker would then create the section even if it turns out to
+ be empty, which isn't pretty. */
+ . = ALIGN(32 / 8);
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ .preinit_array :
+ {
+ KEEP (*(.preinit_array))
+ }
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ PROVIDE_HIDDEN (__init_array_start = .);
+ .init_array :
+ {
+ KEEP (*crtbegin*.o(.init_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.init_array.*) SORT_BY_INIT_PRIORITY(.ctors.*)))
+ KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .ctors))
+ }
+ PROVIDE_HIDDEN (__init_array_end = .);
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ .fini_array :
+ {
+ KEEP (*crtbegin*.o(.fini_array))
+ KEEP (*(SORT_BY_INIT_PRIORITY(.fini_array.*) SORT_BY_INIT_PRIORITY(.dtors.*)))
+ KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin*.o *crtend.o *crtend*.o ) .dtors))
+ }
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin*.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin*.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend*.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got.plt) *(.igot.plt) *(.got) *(.igot) }
+ . = DATA_SEGMENT_RELRO_END (0, .);
+ .data :
+ {
+ PROVIDE (__data_start = .);
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ . = .;
+ __bss_start = .;
+ __bss_start__ = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ _bss_end__ = . ; __bss_end__ = . ;
+ . = ALIGN(32 / 8);
+ . = SEGMENT_START("ldata-segment", .);
+ . = ALIGN(32 / 8);
+ __end__ = . ;
+ _end = .;
+ _bss_end__ = . ; __bss_end__ = . ; __end__ = . ;
+ PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ /* DWARF Extension. */
+ .debug_macro 0 : { *(.debug_macro) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) *(.mdebug.*) }
+}
diff --git a/arm-linux-androideabi/lib/libatomic.a b/arm-linux-androideabi/lib/libatomic.a
new file mode 100644
index 0000000..d4a7e40
--- /dev/null
+++ b/arm-linux-androideabi/lib/libatomic.a
Binary files differ
diff --git a/arm-linux-androideabi/lib/libgomp.a b/arm-linux-androideabi/lib/libgomp.a
new file mode 100644
index 0000000..fb61f19
--- /dev/null
+++ b/arm-linux-androideabi/lib/libgomp.a
Binary files differ
diff --git a/arm-linux-androideabi/lib/libgomp.spec b/arm-linux-androideabi/lib/libgomp.spec
new file mode 100644
index 0000000..2fd7721
--- /dev/null
+++ b/arm-linux-androideabi/lib/libgomp.spec
@@ -0,0 +1,3 @@
+# This spec file is read by gcc when linking. It is used to specify the
+# standard libraries we need in order to link with libgomp.
+*link_gomp: -lgomp
diff --git a/arm-linux-androideabi/lib/thumb/libatomic.a b/arm-linux-androideabi/lib/thumb/libatomic.a
new file mode 100644
index 0000000..a65d96c
--- /dev/null
+++ b/arm-linux-androideabi/lib/thumb/libatomic.a
Binary files differ
diff --git a/arm-linux-androideabi/lib/thumb/libgomp.a b/arm-linux-androideabi/lib/thumb/libgomp.a
new file mode 100644
index 0000000..0f43f02
--- /dev/null
+++ b/arm-linux-androideabi/lib/thumb/libgomp.a
Binary files differ
diff --git a/arm-linux-androideabi/lib/thumb/libgomp.spec b/arm-linux-androideabi/lib/thumb/libgomp.spec
new file mode 100644
index 0000000..2fd7721
--- /dev/null
+++ b/arm-linux-androideabi/lib/thumb/libgomp.spec
@@ -0,0 +1,3 @@
+# This spec file is read by gcc when linking. It is used to specify the
+# standard libraries we need in order to link with libgomp.
+*link_gomp: -lgomp
diff --git a/bin/arm-linux-androideabi-addr2line b/bin/arm-linux-androideabi-addr2line
new file mode 100755
index 0000000..e10ac55
--- /dev/null
+++ b/bin/arm-linux-androideabi-addr2line
Binary files differ
diff --git a/bin/arm-linux-androideabi-ar b/bin/arm-linux-androideabi-ar
new file mode 100755
index 0000000..f63d4a9
--- /dev/null
+++ b/bin/arm-linux-androideabi-ar
Binary files differ
diff --git a/bin/arm-linux-androideabi-as b/bin/arm-linux-androideabi-as
new file mode 100755
index 0000000..55a80f4
--- /dev/null
+++ b/bin/arm-linux-androideabi-as
Binary files differ
diff --git a/bin/arm-linux-androideabi-c++ b/bin/arm-linux-androideabi-c++
new file mode 120000
index 0000000..818bae6
--- /dev/null
+++ b/bin/arm-linux-androideabi-c++
@@ -0,0 +1 @@
+arm-linux-androideabi-g++ \ No newline at end of file
diff --git a/bin/arm-linux-androideabi-c++filt b/bin/arm-linux-androideabi-c++filt
new file mode 100755
index 0000000..e335886
--- /dev/null
+++ b/bin/arm-linux-androideabi-c++filt
Binary files differ
diff --git a/bin/arm-linux-androideabi-cpp b/bin/arm-linux-androideabi-cpp
new file mode 100755
index 0000000..c4649af
--- /dev/null
+++ b/bin/arm-linux-androideabi-cpp
Binary files differ
diff --git a/bin/arm-linux-androideabi-dwp b/bin/arm-linux-androideabi-dwp
new file mode 100755
index 0000000..00343b3
--- /dev/null
+++ b/bin/arm-linux-androideabi-dwp
Binary files differ
diff --git a/bin/arm-linux-androideabi-elfedit b/bin/arm-linux-androideabi-elfedit
new file mode 100755
index 0000000..8eedeab
--- /dev/null
+++ b/bin/arm-linux-androideabi-elfedit
Binary files differ
diff --git a/bin/arm-linux-androideabi-g++ b/bin/arm-linux-androideabi-g++
new file mode 100755
index 0000000..4234417
--- /dev/null
+++ b/bin/arm-linux-androideabi-g++
Binary files differ
diff --git a/bin/arm-linux-androideabi-gcc b/bin/arm-linux-androideabi-gcc
new file mode 100755
index 0000000..c97bf9f
--- /dev/null
+++ b/bin/arm-linux-androideabi-gcc
Binary files differ
diff --git a/bin/arm-linux-androideabi-gcc-4.9 b/bin/arm-linux-androideabi-gcc-4.9
new file mode 120000
index 0000000..b8b9fad
--- /dev/null
+++ b/bin/arm-linux-androideabi-gcc-4.9
@@ -0,0 +1 @@
+arm-linux-androideabi-gcc \ No newline at end of file
diff --git a/bin/arm-linux-androideabi-gcc-4.9.x-google b/bin/arm-linux-androideabi-gcc-4.9.x-google
new file mode 100755
index 0000000..c97bf9f
--- /dev/null
+++ b/bin/arm-linux-androideabi-gcc-4.9.x-google
Binary files differ
diff --git a/bin/arm-linux-androideabi-gcc-ar b/bin/arm-linux-androideabi-gcc-ar
new file mode 100755
index 0000000..ba3bc03
--- /dev/null
+++ b/bin/arm-linux-androideabi-gcc-ar
Binary files differ
diff --git a/bin/arm-linux-androideabi-gcc-nm b/bin/arm-linux-androideabi-gcc-nm
new file mode 100755
index 0000000..a66af96
--- /dev/null
+++ b/bin/arm-linux-androideabi-gcc-nm
Binary files differ
diff --git a/bin/arm-linux-androideabi-gcc-ranlib b/bin/arm-linux-androideabi-gcc-ranlib
new file mode 100755
index 0000000..ad467cd
--- /dev/null
+++ b/bin/arm-linux-androideabi-gcc-ranlib
Binary files differ
diff --git a/bin/arm-linux-androideabi-gcov b/bin/arm-linux-androideabi-gcov
new file mode 100755
index 0000000..0dfd176
--- /dev/null
+++ b/bin/arm-linux-androideabi-gcov
Binary files differ
diff --git a/bin/arm-linux-androideabi-gcov-tool b/bin/arm-linux-androideabi-gcov-tool
new file mode 100755
index 0000000..862a03d
--- /dev/null
+++ b/bin/arm-linux-androideabi-gcov-tool
Binary files differ
diff --git a/bin/arm-linux-androideabi-gdb b/bin/arm-linux-androideabi-gdb
new file mode 100755
index 0000000..451e0f8
--- /dev/null
+++ b/bin/arm-linux-androideabi-gdb
Binary files differ
diff --git a/bin/arm-linux-androideabi-gprof b/bin/arm-linux-androideabi-gprof
new file mode 100755
index 0000000..71b04d0
--- /dev/null
+++ b/bin/arm-linux-androideabi-gprof
Binary files differ
diff --git a/bin/arm-linux-androideabi-ld b/bin/arm-linux-androideabi-ld
new file mode 120000
index 0000000..4194d24
--- /dev/null
+++ b/bin/arm-linux-androideabi-ld
@@ -0,0 +1 @@
+arm-linux-androideabi-ld.gold \ No newline at end of file
diff --git a/bin/arm-linux-androideabi-ld.bfd b/bin/arm-linux-androideabi-ld.bfd
new file mode 100755
index 0000000..9b0ca49
--- /dev/null
+++ b/bin/arm-linux-androideabi-ld.bfd
Binary files differ
diff --git a/bin/arm-linux-androideabi-ld.gold b/bin/arm-linux-androideabi-ld.gold
new file mode 100755
index 0000000..b475f1f
--- /dev/null
+++ b/bin/arm-linux-androideabi-ld.gold
Binary files differ
diff --git a/bin/arm-linux-androideabi-nm b/bin/arm-linux-androideabi-nm
new file mode 100755
index 0000000..74efd8e
--- /dev/null
+++ b/bin/arm-linux-androideabi-nm
Binary files differ
diff --git a/bin/arm-linux-androideabi-objcopy b/bin/arm-linux-androideabi-objcopy
new file mode 100755
index 0000000..7ceec7e
--- /dev/null
+++ b/bin/arm-linux-androideabi-objcopy
Binary files differ
diff --git a/bin/arm-linux-androideabi-objdump b/bin/arm-linux-androideabi-objdump
new file mode 100755
index 0000000..9ebbbec
--- /dev/null
+++ b/bin/arm-linux-androideabi-objdump
Binary files differ
diff --git a/bin/arm-linux-androideabi-ranlib b/bin/arm-linux-androideabi-ranlib
new file mode 100755
index 0000000..e6ad4cc
--- /dev/null
+++ b/bin/arm-linux-androideabi-ranlib
Binary files differ
diff --git a/bin/arm-linux-androideabi-readelf b/bin/arm-linux-androideabi-readelf
new file mode 100755
index 0000000..968f52a
--- /dev/null
+++ b/bin/arm-linux-androideabi-readelf
Binary files differ
diff --git a/bin/arm-linux-androideabi-size b/bin/arm-linux-androideabi-size
new file mode 100755
index 0000000..2eddf00
--- /dev/null
+++ b/bin/arm-linux-androideabi-size
Binary files differ
diff --git a/bin/arm-linux-androideabi-strings b/bin/arm-linux-androideabi-strings
new file mode 100755
index 0000000..7be47c6
--- /dev/null
+++ b/bin/arm-linux-androideabi-strings
Binary files differ
diff --git a/bin/arm-linux-androideabi-strip b/bin/arm-linux-androideabi-strip
new file mode 100755
index 0000000..cbb9fea
--- /dev/null
+++ b/bin/arm-linux-androideabi-strip
Binary files differ
diff --git a/include/gdb/jit-reader.h b/include/gdb/jit-reader.h
new file mode 100644
index 0000000..7cff81a
--- /dev/null
+++ b/include/gdb/jit-reader.h
@@ -0,0 +1,346 @@
+/* JIT declarations for GDB, the GNU Debugger.
+
+ Copyright (C) 2011-2013 Free Software Foundation, Inc.
+
+ This file is part of GDB.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+#ifndef GDB_JIT_READER_H
+#define GDB_JIT_READER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Versioning information. See gdb_reader_funcs. */
+
+#define GDB_READER_INTERFACE_VERSION 1
+
+/* Readers must be released under a GPL compatible license. To
+ declare that the reader is indeed released under a GPL compatible
+ license, invoke the macro GDB_DECLARE_GPL_COMPATIBLE in a source
+ file. */
+
+#ifdef __cplusplus
+#define GDB_DECLARE_GPL_COMPATIBLE_READER \
+ extern "C" { \
+ extern int plugin_is_GPL_compatible (void); \
+ extern int plugin_is_GPL_compatible (void) \
+ { \
+ return 0; \
+ } \
+ }
+
+#else
+
+#define GDB_DECLARE_GPL_COMPATIBLE_READER \
+ extern int plugin_is_GPL_compatible (void); \
+ extern int plugin_is_GPL_compatible (void) \
+ { \
+ return 0; \
+ }
+
+#endif
+
+/* Represents an address on the target system. */
+
+typedef unsigned long GDB_CORE_ADDR;
+
+/* Return status codes. */
+
+enum gdb_status {
+ GDB_FAIL = 0,
+ GDB_SUCCESS = 1
+};
+
+struct gdb_object;
+struct gdb_symtab;
+struct gdb_block;
+struct gdb_symbol_callbacks;
+
+/* An array of these are used to represent a map from code addresses to line
+ numbers in the source file. */
+
+struct gdb_line_mapping
+{
+ int line;
+ GDB_CORE_ADDR pc;
+};
+
+/* Create a new GDB code object. Each code object can have one or
+ more symbol tables, each representing a compiled source file. */
+
+typedef struct gdb_object *(gdb_object_open) (struct gdb_symbol_callbacks *cb);
+
+/* The callback used to create new symbol table. CB is the
+ gdb_symbol_callbacks which the structure is part of. FILE_NAME is
+ an (optionally NULL) file name to associate with this new symbol
+ table.
+
+ Returns a new instance to gdb_symtab that can later be passed to
+ gdb_block_new, gdb_symtab_add_line_mapping and gdb_symtab_close. */
+
+typedef struct gdb_symtab *(gdb_symtab_open) (struct gdb_symbol_callbacks *cb,
+ struct gdb_object *obj,
+ const char *file_name);
+
+/* Creates a new block in a given symbol table. A symbol table is a
+ forest of blocks, each block representing an code address range and
+ a corresponding (optionally NULL) NAME. In case the block
+ corresponds to a function, the NAME passed should be the name of
+ the function.
+
+ If the new block to be created is a child of (i.e. is nested in)
+ another block, the parent block can be passed in PARENT. SYMTAB is
+ the symbol table the new block is to belong in. BEGIN, END is the
+ code address range the block corresponds to.
+
+ Returns a new instance of gdb_block, which, as of now, has no use.
+ Note that the gdb_block returned must not be freed by the
+ caller. */
+
+typedef struct gdb_block *(gdb_block_open) (struct gdb_symbol_callbacks *cb,
+ struct gdb_symtab *symtab,
+ struct gdb_block *parent,
+ GDB_CORE_ADDR begin,
+ GDB_CORE_ADDR end,
+ const char *name);
+
+/* Adds a PC to line number mapping for the symbol table SYMTAB.
+ NLINES is the number of elements in LINES, each element
+ corresponding to one (PC, line) pair. */
+
+typedef void (gdb_symtab_add_line_mapping) (struct gdb_symbol_callbacks *cb,
+ struct gdb_symtab *symtab,
+ int nlines,
+ struct gdb_line_mapping *lines);
+
+/* Close the symtab SYMTAB. This signals to GDB that no more blocks
+ will be opened on this symtab. */
+
+typedef void (gdb_symtab_close) (struct gdb_symbol_callbacks *cb,
+ struct gdb_symtab *symtab);
+
+
+/* Closes the gdb_object OBJ and adds the emitted information into
+ GDB's internal structures. Once this is done, the debug
+ information will be picked up and used; this will usually be the
+ last operation in gdb_read_debug_info. */
+
+typedef void (gdb_object_close) (struct gdb_symbol_callbacks *cb,
+ struct gdb_object *obj);
+
+/* Reads LEN bytes from TARGET_MEM in the target's virtual address
+ space into GDB_BUF.
+
+ Returns GDB_FAIL on failure, and GDB_SUCCESS on success. */
+
+typedef enum gdb_status (gdb_target_read) (GDB_CORE_ADDR target_mem,
+ void *gdb_buf, int len);
+
+/* The list of callbacks that are passed to read. These callbacks are
+ to be used to construct the symbol table. The functions have been
+ described above. */
+
+struct gdb_symbol_callbacks
+{
+ gdb_object_open *object_open;
+ gdb_symtab_open *symtab_open;
+ gdb_block_open *block_open;
+ gdb_symtab_close *symtab_close;
+ gdb_object_close *object_close;
+
+ gdb_symtab_add_line_mapping *line_mapping_add;
+ gdb_target_read *target_read;
+
+ /* For internal use by GDB. */
+ void *priv_data;
+};
+
+/* Forward declaration. */
+
+struct gdb_reg_value;
+
+/* A function of this type is used to free a gdb_reg_value. See the
+ comment on `free' in struct gdb_reg_value. */
+
+typedef void (gdb_reg_value_free) (struct gdb_reg_value *);
+
+/* Denotes the value of a register. */
+
+struct gdb_reg_value
+{
+ /* The size of the register in bytes. The reader need not set this
+ field. This will be set for (defined) register values being read
+ from GDB using reg_get. */
+ int size;
+
+ /* Set to non-zero if the value for the register is known. The
+ registers for which the reader does not call reg_set are also
+ assumed to be undefined */
+ int defined;
+
+ /* Since gdb_reg_value is a variable sized structure, it will
+ usually be allocated on the heap. This function is expected to
+ contain the corresponding "free" function.
+
+ When a pointer to gdb_reg_value is being sent from GDB to the
+ reader (via gdb_unwind_reg_get), the reader is expected to call
+ this function (with the same gdb_reg_value as argument) once it
+ is done with the value.
+
+ When the function sends the a gdb_reg_value to GDB (via
+ gdb_unwind_reg_set), it is expected to set this field to point to
+ an appropriate cleanup routine (or to NULL if no cleanup is
+ required). */
+ gdb_reg_value_free *free;
+
+ /* The value of the register. */
+ unsigned char value[1];
+};
+
+/* get_frame_id in gdb_reader_funcs is to return a gdb_frame_id
+ corresponding to the current frame. The registers corresponding to
+ the current frame can be read using reg_get. Calling get_frame_id
+ on a particular frame should return the same gdb_frame_id
+ throughout its lifetime (i.e. till before it gets unwound). One
+ way to do this is by having the CODE_ADDRESS point to the
+ function's first instruction and STACK_ADDRESS point to the value
+ of the stack pointer when entering the function. */
+
+struct gdb_frame_id
+{
+ GDB_CORE_ADDR code_address;
+ GDB_CORE_ADDR stack_address;
+};
+
+/* Forward declaration. */
+
+struct gdb_unwind_callbacks;
+
+/* Returns the value of a particular register in the current frame.
+ The current frame is the frame that needs to be unwound into the
+ outer (earlier) frame.
+
+ CB is the struct gdb_unwind_callbacks * the callback belongs to.
+ REGNUM is the DWARF register number of the register that needs to
+ be unwound.
+
+ Returns the gdb_reg_value corresponding to the register requested.
+ In case the value of the register has been optimized away or
+ otherwise unavailable, the defined flag in the returned
+ gdb_reg_value will be zero. */
+
+typedef struct gdb_reg_value *(gdb_unwind_reg_get)
+ (struct gdb_unwind_callbacks *cb, int regnum);
+
+/* Sets the previous value of a particular register. REGNUM is the
+ (DWARF) register number whose value is to be set. VAL is the value
+ the register is to be set to.
+
+ VAL is *not* copied, so the memory allocated to it cannot be
+ reused. Once GDB no longer needs the value, it is deallocated
+ using the FREE function (see gdb_reg_value).
+
+ A register can also be "set" to an undefined value by setting the
+ defined in VAL to zero. */
+
+typedef void (gdb_unwind_reg_set) (struct gdb_unwind_callbacks *cb, int regnum,
+ struct gdb_reg_value *val);
+
+/* This struct is passed to unwind in gdb_reader_funcs, and is to be
+ used to unwind the current frame (current being the frame whose
+ registers can be read using reg_get) into the earlier frame. The
+ functions have been described above. */
+
+struct gdb_unwind_callbacks
+{
+ gdb_unwind_reg_get *reg_get;
+ gdb_unwind_reg_set *reg_set;
+ gdb_target_read *target_read;
+
+ /* For internal use by GDB. */
+ void *priv_data;
+};
+
+/* Forward declaration. */
+
+struct gdb_reader_funcs;
+
+/* Parse the debug info off a block of memory, pointed to by MEMORY
+ (already copied to GDB's address space) and MEMORY_SZ bytes long.
+ The implementation has to use the functions in CB to actually emit
+ the parsed data into GDB. SELF is the same structure returned by
+ gdb_init_reader.
+
+ Return GDB_FAIL on failure and GDB_SUCCESS on success. */
+
+typedef enum gdb_status (gdb_read_debug_info) (struct gdb_reader_funcs *self,
+ struct gdb_symbol_callbacks *cb,
+ void *memory, long memory_sz);
+
+/* Unwind the current frame, CB is the set of unwind callbacks that
+ are to be used to do this.
+
+ Return GDB_FAIL on failure and GDB_SUCCESS on success. */
+
+typedef enum gdb_status (gdb_unwind_frame) (struct gdb_reader_funcs *self,
+ struct gdb_unwind_callbacks *cb);
+
+/* Return the frame ID corresponding to the current frame, using C to
+ read the current register values. See the comment on struct
+ gdb_frame_id. */
+
+typedef struct gdb_frame_id (gdb_get_frame_id) (struct gdb_reader_funcs *self,
+ struct gdb_unwind_callbacks *c);
+
+/* Called when a reader is being unloaded. This function should also
+ free SELF, if required. */
+
+typedef void (gdb_destroy_reader) (struct gdb_reader_funcs *self);
+
+/* Called when the reader is loaded. Must either return a properly
+ populated gdb_reader_funcs or NULL. The memory allocated for the
+ gdb_reader_funcs is to be managed by the reader itself (i.e. if it
+ is allocated from the heap, it must also be freed in
+ gdb_destroy_reader). */
+
+extern struct gdb_reader_funcs *gdb_init_reader (void);
+
+/* Pointer to the functions which implement the reader's
+ functionality. The individual functions have been documented
+ above.
+
+ None of the fields are optional. */
+
+struct gdb_reader_funcs
+{
+ /* Must be set to GDB_READER_INTERFACE_VERSION. */
+ int reader_version;
+
+ /* For use by the reader. */
+ void *priv_data;
+
+ gdb_read_debug_info *read;
+ gdb_unwind_frame *unwind;
+ gdb_get_frame_id *get_frame_id;
+ gdb_destroy_reader *destroy;
+};
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtbegin.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtbegin.o
new file mode 100644
index 0000000..3a122ac
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtbeginS.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtbeginS.o
new file mode 100644
index 0000000..285a463
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtbeginS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtbeginT.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtbeginT.o
new file mode 100644
index 0000000..3a122ac
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtbeginT.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtend.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtend.o
new file mode 100644
index 0000000..a106689
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtendS.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtendS.o
new file mode 100644
index 0000000..a106689
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/crtendS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtbegin.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtbegin.o
new file mode 100644
index 0000000..0157718
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtbeginS.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtbeginS.o
new file mode 100644
index 0000000..6bbdcfb
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtbeginS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtbeginT.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtbeginT.o
new file mode 100644
index 0000000..0157718
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtbeginT.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtend.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtend.o
new file mode 100644
index 0000000..f0d9ebb
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtendS.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtendS.o
new file mode 100644
index 0000000..f0d9ebb
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/crtendS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/libgcc.a b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/libgcc.a
new file mode 100644
index 0000000..c430f1e
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/libgcov.a b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/libgcov.a
new file mode 100644
index 0000000..3bf4e16
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/hard/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/libgcc.a b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/libgcc.a
new file mode 100644
index 0000000..9f5b644
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/libgcov.a b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/libgcov.a
new file mode 100644
index 0000000..9f08a52
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtbegin.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtbegin.o
new file mode 100644
index 0000000..7b721f3
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtbeginS.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtbeginS.o
new file mode 100644
index 0000000..a2edda6
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtbeginS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtbeginT.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtbeginT.o
new file mode 100644
index 0000000..7b721f3
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtbeginT.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtend.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtend.o
new file mode 100644
index 0000000..7b9d5b4
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtendS.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtendS.o
new file mode 100644
index 0000000..7b9d5b4
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/crtendS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtbegin.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtbegin.o
new file mode 100644
index 0000000..85e43a2
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtbeginS.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtbeginS.o
new file mode 100644
index 0000000..c7e3fba
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtbeginS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtbeginT.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtbeginT.o
new file mode 100644
index 0000000..85e43a2
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtbeginT.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtend.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtend.o
new file mode 100644
index 0000000..92f272a
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtendS.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtendS.o
new file mode 100644
index 0000000..92f272a
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/crtendS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/libgcc.a b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/libgcc.a
new file mode 100644
index 0000000..1200890
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/libgcov.a b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/libgcov.a
new file mode 100644
index 0000000..9353146
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/hard/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/libgcc.a b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/libgcc.a
new file mode 100644
index 0000000..c897efc
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/libgcov.a b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/libgcov.a
new file mode 100644
index 0000000..55e7cea
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/armv7-a/thumb/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/crtbegin.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/crtbegin.o
new file mode 100644
index 0000000..70e314f
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/crtbeginS.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/crtbeginS.o
new file mode 100644
index 0000000..adceac9
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/crtbeginS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/crtbeginT.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/crtbeginT.o
new file mode 100644
index 0000000..70e314f
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/crtbeginT.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/crtend.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/crtend.o
new file mode 100644
index 0000000..a63b41b
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/crtendS.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/crtendS.o
new file mode 100644
index 0000000..a63b41b
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/crtendS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/gcov-src/gcov-io.c b/lib/gcc/arm-linux-androideabi/4.9.x-google/gcov-src/gcov-io.c
new file mode 100644
index 0000000..f226cbf
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/gcov-src/gcov-io.c
@@ -0,0 +1,1088 @@
+/* File format for coverage information
+ Copyright (C) 1996-2014 Free Software Foundation, Inc.
+ Contributed by Bob Manson <manson@cygnus.com>.
+ Completely remangled by Nathan Sidwell <nathan@codesourcery.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Routines declared in gcov-io.h. This file should be #included by
+ another source file, after having #included gcov-io.h. */
+
+#if !IN_GCOV
+static void gcov_write_block (unsigned);
+static gcov_unsigned_t *gcov_write_words (unsigned);
+#endif
+static const gcov_unsigned_t *gcov_read_words (unsigned);
+#if !IN_LIBGCOV
+static void gcov_allocate (unsigned);
+#endif
+
+/* Optimum number of gcov_unsigned_t's read from or written to disk. */
+#define GCOV_BLOCK_SIZE (1 << 10)
+
+GCOV_LINKAGE struct gcov_var
+{
+ FILE *file;
+ gcov_position_t start; /* Position of first byte of block */
+ unsigned offset; /* Read/write position within the block. */
+ unsigned length; /* Read limit in the block. */
+ unsigned overread; /* Number of words overread. */
+ int error; /* < 0 overflow, > 0 disk error. */
+ int mode; /* < 0 writing, > 0 reading */
+#if IN_LIBGCOV
+ /* Holds one block plus 4 bytes, thus all coverage reads & writes
+ fit within this buffer and we always can transfer GCOV_BLOCK_SIZE
+ to and from the disk. libgcov never backtracks and only writes 4
+ or 8 byte objects. */
+ gcov_unsigned_t buffer[GCOV_BLOCK_SIZE + 1];
+#else
+ int endian; /* Swap endianness. */
+ /* Holds a variable length block, as the compiler can write
+ strings and needs to backtrack. */
+ size_t alloc;
+ gcov_unsigned_t *buffer;
+#endif
+} gcov_var;
+
+/* Save the current position in the gcov file. */
+/* We need to expose this function when compiling for gcov-tool. */
+#ifndef IN_GCOV_TOOL
+static inline
+#endif
+gcov_position_t
+gcov_position (void)
+{
+ return gcov_var.start + gcov_var.offset;
+}
+
+/* Return nonzero if the error flag is set. */
+/* We need to expose this function when compiling for gcov-tool. */
+#ifndef IN_GCOV_TOOL
+static inline
+#endif
+int
+gcov_is_error (void)
+{
+ return gcov_var.file ? gcov_var.error : 1;
+}
+
+#if IN_LIBGCOV
+/* Move to beginning of file and initialize for writing. */
+GCOV_LINKAGE inline void
+gcov_rewrite (void)
+{
+ gcc_assert (gcov_var.mode > 0);
+ gcov_var.mode = -1;
+ gcov_var.start = 0;
+ gcov_var.offset = 0;
+ fseek (gcov_var.file, 0L, SEEK_SET);
+}
+#endif
+
+static inline gcov_unsigned_t from_file (gcov_unsigned_t value)
+{
+#if !IN_LIBGCOV
+ if (gcov_var.endian)
+ {
+ value = (value >> 16) | (value << 16);
+ value = ((value & 0xff00ff) << 8) | ((value >> 8) & 0xff00ff);
+ }
+#endif
+ return value;
+}
+
+/* Open a gcov file. NAME is the name of the file to open and MODE
+ indicates whether a new file should be created, or an existing file
+ opened. If MODE is >= 0 an existing file will be opened, if
+ possible, and if MODE is <= 0, a new file will be created. Use
+ MODE=0 to attempt to reopen an existing file and then fall back on
+ creating a new one. If MODE < 0, the file will be opened in
+ read-only mode. Otherwise it will be opened for modification.
+ Return zero on failure, >0 on opening an existing file and <0 on
+ creating a new one. */
+
+GCOV_LINKAGE int
+#if IN_LIBGCOV
+gcov_open (const char *name)
+#else
+gcov_open (const char *name, int mode)
+#endif
+{
+#if IN_LIBGCOV
+ const int mode = 0;
+#endif
+#if GCOV_LOCKED
+ struct flock s_flock;
+ int fd;
+
+ s_flock.l_whence = SEEK_SET;
+ s_flock.l_start = 0;
+ s_flock.l_len = 0; /* Until EOF. */
+ s_flock.l_pid = getpid ();
+#endif
+
+ gcc_assert (!gcov_var.file);
+ gcov_var.start = 0;
+ gcov_var.offset = gcov_var.length = 0;
+ gcov_var.overread = -1u;
+ gcov_var.error = 0;
+#if !IN_LIBGCOV
+ gcov_var.endian = 0;
+#endif
+#if GCOV_LOCKED
+ if (mode > 0)
+ {
+ /* Read-only mode - acquire a read-lock. */
+ s_flock.l_type = F_RDLCK;
+ /* pass mode (ignored) for compatibility */
+ fd = open (name, O_RDONLY, S_IRUSR | S_IWUSR);
+ }
+ else if (mode < 0)
+ {
+ /* Write mode - acquire a write-lock. */
+ s_flock.l_type = F_WRLCK;
+ fd = open (name, O_RDWR | O_CREAT | O_TRUNC, 0666);
+ }
+ else /* mode == 0 */
+ {
+ /* Read-Write mode - acquire a write-lock. */
+ s_flock.l_type = F_WRLCK;
+ fd = open (name, O_RDWR | O_CREAT, 0666);
+ }
+ if (fd < 0)
+ return 0;
+
+ while (fcntl (fd, F_SETLKW, &s_flock) && errno == EINTR)
+ continue;
+
+ gcov_var.file = fdopen (fd, (mode > 0) ? "rb" : "r+b");
+
+ if (!gcov_var.file)
+ {
+ close (fd);
+ return 0;
+ }
+
+ if (mode > 0)
+ gcov_var.mode = 1;
+ else if (mode == 0)
+ {
+ struct stat st;
+
+ if (fstat (fd, &st) < 0)
+ {
+ fclose (gcov_var.file);
+ gcov_var.file = 0;
+ return 0;
+ }
+ if (st.st_size != 0)
+ gcov_var.mode = 1;
+ else
+ gcov_var.mode = mode * 2 + 1;
+ }
+ else
+ gcov_var.mode = mode * 2 + 1;
+#else
+ if (mode >= 0)
+ gcov_var.file = fopen (name, (mode > 0) ? "rb" : "r+b");
+
+ if (gcov_var.file)
+ gcov_var.mode = 1;
+ else if (mode <= 0)
+ {
+ gcov_var.file = fopen (name, "w+b");
+ if (gcov_var.file)
+ gcov_var.mode = mode * 2 + 1;
+ }
+ if (!gcov_var.file)
+ return 0;
+#endif
+
+ setbuf (gcov_var.file, (char *)0);
+
+ return 1;
+}
+
+/* Close the current gcov file. Flushes data to disk. Returns nonzero
+ on failure or error flag set. */
+
+GCOV_LINKAGE int
+gcov_close (void)
+{
+ if (gcov_var.file)
+ {
+#if !IN_GCOV
+ if (gcov_var.offset && gcov_var.mode < 0)
+ gcov_write_block (gcov_var.offset);
+#endif
+ fclose (gcov_var.file);
+ gcov_var.file = 0;
+ gcov_var.length = 0;
+ }
+#if !IN_LIBGCOV
+ free (gcov_var.buffer);
+ gcov_var.alloc = 0;
+ gcov_var.buffer = 0;
+#endif
+ gcov_var.mode = 0;
+ return gcov_var.error;
+}
+
+#if !IN_LIBGCOV
+/* Check if MAGIC is EXPECTED. Use it to determine endianness of the
+ file. Returns +1 for same endian, -1 for other endian and zero for
+ not EXPECTED. */
+
+GCOV_LINKAGE int
+gcov_magic (gcov_unsigned_t magic, gcov_unsigned_t expected)
+{
+ if (magic == expected)
+ return 1;
+ magic = (magic >> 16) | (magic << 16);
+ magic = ((magic & 0xff00ff) << 8) | ((magic >> 8) & 0xff00ff);
+ if (magic == expected)
+ {
+ gcov_var.endian = 1;
+ return -1;
+ }
+ return 0;
+}
+#endif
+
+#if !IN_LIBGCOV
+static void
+gcov_allocate (unsigned length)
+{
+ size_t new_size = gcov_var.alloc;
+
+ if (!new_size)
+ new_size = GCOV_BLOCK_SIZE;
+ new_size += length;
+ new_size *= 2;
+
+ gcov_var.alloc = new_size;
+ gcov_var.buffer = XRESIZEVAR (gcov_unsigned_t, gcov_var.buffer, new_size << 2);
+}
+#endif
+
+#if !IN_GCOV
+/* Write out the current block, if needs be. */
+
+static void
+gcov_write_block (unsigned size)
+{
+ if (fwrite (gcov_var.buffer, size << 2, 1, gcov_var.file) != 1)
+ gcov_var.error = 1;
+ gcov_var.start += size;
+ gcov_var.offset -= size;
+}
+
+/* Allocate space to write BYTES bytes to the gcov file. Return a
+ pointer to those bytes, or NULL on failure. */
+
+static gcov_unsigned_t *
+gcov_write_words (unsigned words)
+{
+ gcov_unsigned_t *result;
+
+ gcc_assert (gcov_var.mode < 0);
+#if IN_LIBGCOV
+ if (gcov_var.offset >= GCOV_BLOCK_SIZE)
+ {
+ gcov_write_block (GCOV_BLOCK_SIZE);
+ if (gcov_var.offset)
+ {
+ gcc_assert (gcov_var.offset == 1);
+ memcpy (gcov_var.buffer, gcov_var.buffer + GCOV_BLOCK_SIZE, 4);
+ }
+ }
+#else
+ if (gcov_var.offset + words > gcov_var.alloc)
+ gcov_allocate (gcov_var.offset + words);
+#endif
+ result = &gcov_var.buffer[gcov_var.offset];
+ gcov_var.offset += words;
+
+ return result;
+}
+
+/* Write unsigned VALUE to coverage file. Sets error flag
+ appropriately. */
+
+GCOV_LINKAGE void
+gcov_write_unsigned (gcov_unsigned_t value)
+{
+ gcov_unsigned_t *buffer = gcov_write_words (1);
+
+ buffer[0] = value;
+}
+
+/* Write counter VALUE to coverage file. Sets error flag
+ appropriately. */
+
+#if IN_LIBGCOV
+GCOV_LINKAGE void
+gcov_write_counter (gcov_type value)
+{
+ gcov_unsigned_t *buffer = gcov_write_words (2);
+
+ buffer[0] = (gcov_unsigned_t) value;
+ if (sizeof (value) > sizeof (gcov_unsigned_t))
+ buffer[1] = (gcov_unsigned_t) (value >> 32);
+ else
+ buffer[1] = 0;
+}
+#endif /* IN_LIBGCOV */
+
+#if !IN_LIBGCOV
+/* Write STRING to coverage file. Sets error flag on file
+ error, overflow flag on overflow */
+
+GCOV_LINKAGE void
+gcov_write_string (const char *string)
+{
+ unsigned length = 0;
+ unsigned alloc = 0;
+ gcov_unsigned_t *buffer;
+
+ if (string)
+ {
+ length = strlen (string);
+ alloc = (length + 4) >> 2;
+ }
+
+ buffer = gcov_write_words (1 + alloc);
+
+ buffer[0] = alloc;
+ buffer[alloc] = 0;
+ memcpy (&buffer[1], string, length);
+}
+#endif
+
+#if !IN_LIBGCOV
+/* Write a tag TAG and reserve space for the record length. Return a
+ value to be used for gcov_write_length. */
+
+GCOV_LINKAGE gcov_position_t
+gcov_write_tag (gcov_unsigned_t tag)
+{
+ gcov_position_t result = gcov_var.start + gcov_var.offset;
+ gcov_unsigned_t *buffer = gcov_write_words (2);
+
+ buffer[0] = tag;
+ buffer[1] = 0;
+
+ return result;
+}
+
+/* Write a record length using POSITION, which was returned by
+ gcov_write_tag. The current file position is the end of the
+ record, and is restored before returning. Returns nonzero on
+ overflow. */
+
+GCOV_LINKAGE void
+gcov_write_length (gcov_position_t position)
+{
+ unsigned offset;
+ gcov_unsigned_t length;
+ gcov_unsigned_t *buffer;
+
+ gcc_assert (gcov_var.mode < 0);
+ gcc_assert (position + 2 <= gcov_var.start + gcov_var.offset);
+ gcc_assert (position >= gcov_var.start);
+ offset = position - gcov_var.start;
+ length = gcov_var.offset - offset - 2;
+ buffer = (gcov_unsigned_t *) &gcov_var.buffer[offset];
+ buffer[1] = length;
+ if (gcov_var.offset >= GCOV_BLOCK_SIZE)
+ gcov_write_block (gcov_var.offset);
+}
+
+#else /* IN_LIBGCOV */
+
+/* Write a tag TAG and length LENGTH. */
+
+GCOV_LINKAGE void
+gcov_write_tag_length (gcov_unsigned_t tag, gcov_unsigned_t length)
+{
+ gcov_unsigned_t *buffer = gcov_write_words (2);
+
+ buffer[0] = tag;
+ buffer[1] = length;
+}
+
+/* Write a summary structure to the gcov file. Return nonzero on
+ overflow. */
+
+GCOV_LINKAGE void
+gcov_write_summary (gcov_unsigned_t tag, const struct gcov_summary *summary)
+{
+ unsigned ix, h_ix, bv_ix, h_cnt = 0;
+ const struct gcov_ctr_summary *csum;
+ unsigned histo_bitvector[GCOV_HISTOGRAM_BITVECTOR_SIZE];
+
+ /* Count number of non-zero histogram entries, and fill in a bit vector
+ of non-zero indices. The histogram is only currently computed for arc
+ counters. */
+ for (bv_ix = 0; bv_ix < GCOV_HISTOGRAM_BITVECTOR_SIZE; bv_ix++)
+ histo_bitvector[bv_ix] = 0;
+ csum = &summary->ctrs[GCOV_COUNTER_ARCS];
+ for (h_ix = 0; h_ix < GCOV_HISTOGRAM_SIZE; h_ix++)
+ {
+ if (csum->histogram[h_ix].num_counters > 0)
+ {
+ histo_bitvector[h_ix / 32] |= 1 << (h_ix % 32);
+ h_cnt++;
+ }
+ }
+ gcov_write_tag_length (tag, GCOV_TAG_SUMMARY_LENGTH (h_cnt));
+ gcov_write_unsigned (summary->checksum);
+ for (csum = summary->ctrs, ix = GCOV_COUNTERS_SUMMABLE; ix--; csum++)
+ {
+ gcov_write_unsigned (csum->num);
+ gcov_write_unsigned (csum->runs);
+ gcov_write_counter (csum->sum_all);
+ gcov_write_counter (csum->run_max);
+ gcov_write_counter (csum->sum_max);
+ if (ix != GCOV_COUNTER_ARCS)
+ {
+ for (bv_ix = 0; bv_ix < GCOV_HISTOGRAM_BITVECTOR_SIZE; bv_ix++)
+ gcov_write_unsigned (0);
+ continue;
+ }
+ for (bv_ix = 0; bv_ix < GCOV_HISTOGRAM_BITVECTOR_SIZE; bv_ix++)
+ gcov_write_unsigned (histo_bitvector[bv_ix]);
+ for (h_ix = 0; h_ix < GCOV_HISTOGRAM_SIZE; h_ix++)
+ {
+ if (!csum->histogram[h_ix].num_counters)
+ continue;
+ gcov_write_unsigned (csum->histogram[h_ix].num_counters);
+ gcov_write_counter (csum->histogram[h_ix].min_value);
+ gcov_write_counter (csum->histogram[h_ix].cum_value);
+ }
+ }
+}
+#endif /* IN_LIBGCOV */
+
+#endif /*!IN_GCOV */
+
+/* Return a pointer to read BYTES bytes from the gcov file. Returns
+ NULL on failure (read past EOF). */
+
+static const gcov_unsigned_t *
+gcov_read_words (unsigned words)
+{
+ const gcov_unsigned_t *result;
+ unsigned excess = gcov_var.length - gcov_var.offset;
+
+ gcc_assert (gcov_var.mode > 0);
+ if (excess < words)
+ {
+ gcov_var.start += gcov_var.offset;
+#if IN_LIBGCOV
+ if (excess)
+ {
+ gcc_assert (excess == 1);
+ memcpy (gcov_var.buffer, gcov_var.buffer + gcov_var.offset, 4);
+ }
+#else
+ memmove (gcov_var.buffer, gcov_var.buffer + gcov_var.offset, excess * 4);
+#endif
+ gcov_var.offset = 0;
+ gcov_var.length = excess;
+#if IN_LIBGCOV
+ gcc_assert (!gcov_var.length || gcov_var.length == 1);
+ excess = GCOV_BLOCK_SIZE;
+#else
+ if (gcov_var.length + words > gcov_var.alloc)
+ gcov_allocate (gcov_var.length + words);
+ excess = gcov_var.alloc - gcov_var.length;
+#endif
+ excess = fread (gcov_var.buffer + gcov_var.length,
+ 1, excess << 2, gcov_var.file) >> 2;
+ gcov_var.length += excess;
+ if (gcov_var.length < words)
+ {
+ gcov_var.overread += words - gcov_var.length;
+ gcov_var.length = 0;
+ return 0;
+ }
+ }
+ result = &gcov_var.buffer[gcov_var.offset];
+ gcov_var.offset += words;
+ return result;
+}
+
+/* Read unsigned value from a coverage file. Sets error flag on file
+ error, overflow flag on overflow */
+
+GCOV_LINKAGE gcov_unsigned_t
+gcov_read_unsigned (void)
+{
+ gcov_unsigned_t value;
+ const gcov_unsigned_t *buffer = gcov_read_words (1);
+
+ if (!buffer)
+ return 0;
+ value = from_file (buffer[0]);
+ return value;
+}
+
+/* Read counter value from a coverage file. Sets error flag on file
+ error, overflow flag on overflow */
+
+GCOV_LINKAGE gcov_type
+gcov_read_counter (void)
+{
+ gcov_type value;
+ const gcov_unsigned_t *buffer = gcov_read_words (2);
+
+ if (!buffer)
+ return 0;
+ value = from_file (buffer[0]);
+ if (sizeof (value) > sizeof (gcov_unsigned_t))
+ value |= ((gcov_type) from_file (buffer[1])) << 32;
+ else if (buffer[1])
+ gcov_var.error = -1;
+
+ return value;
+}
+
+/* We need to expose the below function when compiling for gcov-tool. */
+
+#if !IN_LIBGCOV || defined (IN_GCOV_TOOL)
+/* Read string from coverage file. Returns a pointer to a static
+ buffer, or NULL on empty string. You must copy the string before
+ calling another gcov function. */
+
+GCOV_LINKAGE const char *
+gcov_read_string (void)
+{
+ unsigned length = gcov_read_unsigned ();
+
+ if (!length)
+ return 0;
+
+ return (const char *) gcov_read_words (length);
+}
+#endif
+
+GCOV_LINKAGE void
+gcov_read_summary (struct gcov_summary *summary)
+{
+ unsigned ix, h_ix, bv_ix, h_cnt = 0;
+ struct gcov_ctr_summary *csum;
+ unsigned histo_bitvector[GCOV_HISTOGRAM_BITVECTOR_SIZE];
+ unsigned cur_bitvector;
+
+ summary->checksum = gcov_read_unsigned ();
+ for (csum = summary->ctrs, ix = GCOV_COUNTERS_SUMMABLE; ix--; csum++)
+ {
+ csum->num = gcov_read_unsigned ();
+ csum->runs = gcov_read_unsigned ();
+ csum->sum_all = gcov_read_counter ();
+ csum->run_max = gcov_read_counter ();
+ csum->sum_max = gcov_read_counter ();
+ memset (csum->histogram, 0,
+ sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
+ for (bv_ix = 0; bv_ix < GCOV_HISTOGRAM_BITVECTOR_SIZE; bv_ix++)
+ {
+ histo_bitvector[bv_ix] = gcov_read_unsigned ();
+#if IN_LIBGCOV
+ /* When building libgcov we don't include system.h, which includes
+ hwint.h (where popcount_hwi is declared). However, libgcov.a
+ is built by the bootstrapped compiler and therefore the builtins
+ are always available. */
+ h_cnt += __builtin_popcount (histo_bitvector[bv_ix]);
+#else
+ h_cnt += popcount_hwi (histo_bitvector[bv_ix]);
+#endif
+ }
+ bv_ix = 0;
+ h_ix = 0;
+ cur_bitvector = 0;
+ while (h_cnt--)
+ {
+ /* Find the index corresponding to the next entry we will read in.
+ First find the next non-zero bitvector and re-initialize
+ the histogram index accordingly, then right shift and increment
+ the index until we find a set bit. */
+ while (!cur_bitvector)
+ {
+ h_ix = bv_ix * 32;
+ gcc_assert (bv_ix < GCOV_HISTOGRAM_BITVECTOR_SIZE);
+ cur_bitvector = histo_bitvector[bv_ix++];
+ }
+ while (!(cur_bitvector & 0x1))
+ {
+ h_ix++;
+ cur_bitvector >>= 1;
+ }
+ gcc_assert (h_ix < GCOV_HISTOGRAM_SIZE);
+
+ csum->histogram[h_ix].num_counters = gcov_read_unsigned ();
+ csum->histogram[h_ix].min_value = gcov_read_counter ();
+ csum->histogram[h_ix].cum_value = gcov_read_counter ();
+ /* Shift off the index we are done with and increment to the
+ corresponding next histogram entry. */
+ cur_bitvector >>= 1;
+ h_ix++;
+ }
+ }
+}
+
+#if (!IN_LIBGCOV && IN_GCOV != 1) || defined (IN_GCOV_TOOL)
+/* Read LEN words (unsigned type) and construct MOD_INFO. */
+
+GCOV_LINKAGE void
+gcov_read_module_info (struct gcov_module_info *mod_info,
+ gcov_unsigned_t len)
+{
+ gcov_unsigned_t src_filename_len, filename_len, i, j, num_strings;
+ mod_info->ident = gcov_read_unsigned ();
+ mod_info->is_primary = gcov_read_unsigned ();
+ mod_info->flags = gcov_read_unsigned ();
+ mod_info->lang = gcov_read_unsigned ();
+ mod_info->ggc_memory = gcov_read_unsigned ();
+ mod_info->num_quote_paths = gcov_read_unsigned ();
+ mod_info->num_bracket_paths = gcov_read_unsigned ();
+ mod_info->num_system_paths = gcov_read_unsigned ();
+ mod_info->num_cpp_defines = gcov_read_unsigned ();
+ mod_info->num_cpp_includes = gcov_read_unsigned ();
+ mod_info->num_cl_args = gcov_read_unsigned ();
+ len -= 11;
+
+ filename_len = gcov_read_unsigned ();
+ mod_info->da_filename = (char *) xmalloc (filename_len *
+ sizeof (gcov_unsigned_t));
+ for (i = 0; i < filename_len; i++)
+ ((gcov_unsigned_t *) mod_info->da_filename)[i] = gcov_read_unsigned ();
+ len -= (filename_len + 1);
+
+ src_filename_len = gcov_read_unsigned ();
+ mod_info->source_filename = (char *) xmalloc (src_filename_len *
+ sizeof (gcov_unsigned_t));
+ for (i = 0; i < src_filename_len; i++)
+ ((gcov_unsigned_t *) mod_info->source_filename)[i] = gcov_read_unsigned ();
+ len -= (src_filename_len + 1);
+
+ num_strings = mod_info->num_quote_paths + mod_info->num_bracket_paths
+ + mod_info->num_system_paths
+ + mod_info->num_cpp_defines + mod_info->num_cpp_includes
+ + mod_info->num_cl_args;
+ for (j = 0; j < num_strings; j++)
+ {
+ gcov_unsigned_t string_len = gcov_read_unsigned ();
+ mod_info->string_array[j] =
+ (char *) xmalloc (string_len * sizeof (gcov_unsigned_t));
+ for (i = 0; i < string_len; i++)
+ ((gcov_unsigned_t *) mod_info->string_array[j])[i] =
+ gcov_read_unsigned ();
+ len -= (string_len + 1);
+ }
+ gcc_assert (!len);
+}
+#endif
+
+/* We need to expose the below function when compiling for gcov-tool. */
+
+#if !IN_LIBGCOV || defined (IN_GCOV_TOOL)
+/* Reset to a known position. BASE should have been obtained from
+ gcov_position, LENGTH should be a record length. */
+
+GCOV_LINKAGE void
+gcov_sync (gcov_position_t base, gcov_unsigned_t length)
+{
+ gcc_assert (gcov_var.mode > 0);
+ base += length;
+ if (base - gcov_var.start <= gcov_var.length)
+ gcov_var.offset = base - gcov_var.start;
+ else
+ {
+ gcov_var.offset = gcov_var.length = 0;
+ fseek (gcov_var.file, base << 2, SEEK_SET);
+ gcov_var.start = ftell (gcov_var.file) >> 2;
+ }
+}
+#endif
+
+#if IN_LIBGCOV
+/* Move to a given position in a gcov file. */
+
+GCOV_LINKAGE void
+gcov_seek (gcov_position_t base)
+{
+ gcc_assert (gcov_var.mode < 0);
+ if (gcov_var.offset)
+ gcov_write_block (gcov_var.offset);
+ fseek (gcov_var.file, base << 2, SEEK_SET);
+ gcov_var.start = ftell (gcov_var.file) >> 2;
+}
+
+/* Truncate the gcov file at the current position. */
+
+GCOV_LINKAGE void
+gcov_truncate (void)
+{
+ long offs;
+ int filenum;
+ gcc_assert (gcov_var.mode < 0);
+ if (gcov_var.offset)
+ gcov_write_block (gcov_var.offset);
+ offs = ftell (gcov_var.file);
+ filenum = fileno (gcov_var.file);
+ if (offs == -1 || filenum == -1 || ftruncate (filenum, offs))
+ gcov_var.error = 1;
+}
+#endif
+
+#if IN_GCOV > 0
+/* Return the modification time of the current gcov file. */
+
+GCOV_LINKAGE time_t
+gcov_time (void)
+{
+ struct stat status;
+
+ if (fstat (fileno (gcov_var.file), &status))
+ return 0;
+ else
+ return status.st_mtime;
+}
+#endif /* IN_GCOV */
+
+#if !IN_GCOV
+/* Determine the index into histogram for VALUE. */
+
+#if IN_LIBGCOV
+static unsigned
+#else
+GCOV_LINKAGE unsigned
+#endif
+gcov_histo_index (gcov_type value)
+{
+ gcov_type_unsigned v = (gcov_type_unsigned)value;
+ unsigned r = 0;
+ unsigned prev2bits = 0;
+
+ /* Find index into log2 scale histogram, where each of the log2
+ sized buckets is divided into 4 linear sub-buckets for better
+ focus in the higher buckets. */
+
+ /* Find the place of the most-significant bit set. */
+ if (v > 0)
+ {
+#if IN_LIBGCOV
+ /* When building libgcov we don't include system.h, which includes
+ hwint.h (where floor_log2 is declared). However, libgcov.a
+ is built by the bootstrapped compiler and therefore the builtins
+ are always available. */
+ r = sizeof (long long) * __CHAR_BIT__ - 1 - __builtin_clzll (v);
+#else
+ /* We use floor_log2 from hwint.c, which takes a HOST_WIDE_INT
+ that is either 32 or 64 bits, and gcov_type_unsigned may be 64 bits.
+ Need to check for the case where gcov_type_unsigned is 64 bits
+ and HOST_WIDE_INT is 32 bits and handle it specially. */
+#if HOST_BITS_PER_WIDEST_INT == HOST_BITS_PER_WIDE_INT
+ r = floor_log2 (v);
+#elif HOST_BITS_PER_WIDEST_INT == 2 * HOST_BITS_PER_WIDE_INT
+ HOST_WIDE_INT hwi_v = v >> HOST_BITS_PER_WIDE_INT;
+ if (hwi_v)
+ r = floor_log2 (hwi_v) + HOST_BITS_PER_WIDE_INT;
+ else
+ r = floor_log2 ((HOST_WIDE_INT)v);
+#else
+ gcc_unreachable ();
+#endif
+#endif
+ }
+
+ /* If at most the 2 least significant bits are set (value is
+ 0 - 3) then that value is our index into the lowest set of
+ four buckets. */
+ if (r < 2)
+ return (unsigned)value;
+
+ gcc_assert (r < 64);
+
+ /* Find the two next most significant bits to determine which
+ of the four linear sub-buckets to select. */
+ prev2bits = (v >> (r - 2)) & 0x3;
+ /* Finally, compose the final bucket index from the log2 index and
+ the next 2 bits. The minimum r value at this point is 2 since we
+ returned above if r was 2 or more, so the minimum bucket at this
+ point is 4. */
+ return (r - 1) * 4 + prev2bits;
+}
+
+/* Merge SRC_HISTO into TGT_HISTO. The counters are assumed to be in
+ the same relative order in both histograms, and are matched up
+ and merged in reverse order. Each counter is assigned an equal portion of
+ its entry's original cumulative counter value when computing the
+ new merged cum_value. */
+
+static void gcov_histogram_merge (gcov_bucket_type *tgt_histo,
+ gcov_bucket_type *src_histo)
+{
+ int src_i, tgt_i, tmp_i = 0;
+ unsigned src_num, tgt_num, merge_num;
+ gcov_type src_cum, tgt_cum, merge_src_cum, merge_tgt_cum, merge_cum;
+ gcov_type merge_min;
+ gcov_bucket_type tmp_histo[GCOV_HISTOGRAM_SIZE];
+ int src_done = 0;
+
+ memset (tmp_histo, 0, sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
+
+ /* Assume that the counters are in the same relative order in both
+ histograms. Walk the histograms from largest to smallest entry,
+ matching up and combining counters in order. */
+ src_num = 0;
+ src_cum = 0;
+ src_i = GCOV_HISTOGRAM_SIZE - 1;
+ for (tgt_i = GCOV_HISTOGRAM_SIZE - 1; tgt_i >= 0 && !src_done; tgt_i--)
+ {
+ tgt_num = tgt_histo[tgt_i].num_counters;
+ tgt_cum = tgt_histo[tgt_i].cum_value;
+ /* Keep going until all of the target histogram's counters at this
+ position have been matched and merged with counters from the
+ source histogram. */
+ while (tgt_num > 0 && !src_done)
+ {
+ /* If this is either the first time through this loop or we just
+ exhausted the previous non-zero source histogram entry, look
+ for the next non-zero source histogram entry. */
+ if (!src_num)
+ {
+ /* Locate the next non-zero entry. */
+ while (src_i >= 0 && !src_histo[src_i].num_counters)
+ src_i--;
+ /* If source histogram has fewer counters, then just copy over the
+ remaining target counters and quit. */
+ if (src_i < 0)
+ {
+ tmp_histo[tgt_i].num_counters += tgt_num;
+ tmp_histo[tgt_i].cum_value += tgt_cum;
+ if (!tmp_histo[tgt_i].min_value ||
+ tgt_histo[tgt_i].min_value < tmp_histo[tgt_i].min_value)
+ tmp_histo[tgt_i].min_value = tgt_histo[tgt_i].min_value;
+ while (--tgt_i >= 0)
+ {
+ tmp_histo[tgt_i].num_counters
+ += tgt_histo[tgt_i].num_counters;
+ tmp_histo[tgt_i].cum_value += tgt_histo[tgt_i].cum_value;
+ if (!tmp_histo[tgt_i].min_value ||
+ tgt_histo[tgt_i].min_value
+ < tmp_histo[tgt_i].min_value)
+ tmp_histo[tgt_i].min_value = tgt_histo[tgt_i].min_value;
+ }
+
+ src_done = 1;
+ break;
+ }
+
+ src_num = src_histo[src_i].num_counters;
+ src_cum = src_histo[src_i].cum_value;
+ }
+
+ /* The number of counters to merge on this pass is the minimum
+ of the remaining counters from the current target and source
+ histogram entries. */
+ merge_num = tgt_num;
+ if (src_num < merge_num)
+ merge_num = src_num;
+
+ /* The merged min_value is the sum of the min_values from target
+ and source. */
+ merge_min = tgt_histo[tgt_i].min_value + src_histo[src_i].min_value;
+
+ /* Compute the portion of source and target entries' cum_value
+ that will be apportioned to the counters being merged.
+ The total remaining cum_value from each entry is divided
+ equally among the counters from that histogram entry if we
+ are not merging all of them. */
+ merge_src_cum = src_cum;
+ if (merge_num < src_num)
+ merge_src_cum = merge_num * src_cum / src_num;
+ merge_tgt_cum = tgt_cum;
+ if (merge_num < tgt_num)
+ merge_tgt_cum = merge_num * tgt_cum / tgt_num;
+ /* The merged cum_value is the sum of the source and target
+ components. */
+ merge_cum = merge_src_cum + merge_tgt_cum;
+
+ /* Update the remaining number of counters and cum_value left
+ to be merged from this source and target entry. */
+ src_cum -= merge_src_cum;
+ tgt_cum -= merge_tgt_cum;
+ src_num -= merge_num;
+ tgt_num -= merge_num;
+
+ /* The merged counters get placed in the new merged histogram
+ at the entry for the merged min_value. */
+ tmp_i = gcov_histo_index (merge_min);
+ gcc_assert (tmp_i < GCOV_HISTOGRAM_SIZE);
+ tmp_histo[tmp_i].num_counters += merge_num;
+ tmp_histo[tmp_i].cum_value += merge_cum;
+ if (!tmp_histo[tmp_i].min_value ||
+ merge_min < tmp_histo[tmp_i].min_value)
+ tmp_histo[tmp_i].min_value = merge_min;
+
+ /* Ensure the search for the next non-zero src_histo entry starts
+ at the next smallest histogram bucket. */
+ if (!src_num)
+ src_i--;
+ }
+ }
+
+ gcc_assert (tgt_i < 0);
+
+ /* In the case where there were more counters in the source histogram,
+ accumulate the remaining unmerged cumulative counter values. Add
+ those to the smallest non-zero target histogram entry. Otherwise,
+ the total cumulative counter values in the histogram will be smaller
+ than the sum_all stored in the summary, which will complicate
+ computing the working set information from the histogram later on. */
+ if (src_num)
+ src_i--;
+ while (src_i >= 0)
+ {
+ src_cum += src_histo[src_i].cum_value;
+ src_i--;
+ }
+ /* At this point, tmp_i should be the smallest non-zero entry in the
+ tmp_histo. */
+ gcc_assert (tmp_i >= 0 && tmp_i < GCOV_HISTOGRAM_SIZE
+ && tmp_histo[tmp_i].num_counters > 0);
+ tmp_histo[tmp_i].cum_value += src_cum;
+
+ /* Finally, copy the merged histogram into tgt_histo. */
+ memcpy (tgt_histo, tmp_histo,
+ sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
+}
+#endif /* !IN_GCOV */
+
+/* This is used by gcov-dump (IN_GCOV == -1) and in the compiler
+ (!IN_GCOV && !IN_LIBGCOV). */
+#if IN_GCOV <= 0 && !IN_LIBGCOV
+/* Compute the working set information from the counter histogram in
+ the profile summary. This is an array of information corresponding to a
+ range of percentages of the total execution count (sum_all), and includes
+ the number of counters required to cover that working set percentage and
+ the minimum counter value in that working set. */
+
+GCOV_LINKAGE void
+compute_working_sets (const struct gcov_ctr_summary *summary,
+ gcov_working_set_t *gcov_working_sets)
+{
+ gcov_type working_set_cum_values[NUM_GCOV_WORKING_SETS];
+ gcov_type ws_cum_hotness_incr;
+ gcov_type cum, tmp_cum;
+ const gcov_bucket_type *histo_bucket;
+ unsigned ws_ix, c_num, count;
+ int h_ix;
+
+ /* Compute the amount of sum_all that the cumulative hotness grows
+ by in each successive working set entry, which depends on the
+ number of working set entries. */
+ ws_cum_hotness_incr = summary->sum_all / NUM_GCOV_WORKING_SETS;
+
+ /* Next fill in an array of the cumulative hotness values corresponding
+ to each working set summary entry we are going to compute below.
+ Skip 0% statistics, which can be extrapolated from the
+ rest of the summary data. */
+ cum = ws_cum_hotness_incr;
+ for (ws_ix = 0; ws_ix < NUM_GCOV_WORKING_SETS;
+ ws_ix++, cum += ws_cum_hotness_incr)
+ working_set_cum_values[ws_ix] = cum;
+ /* The last summary entry is reserved for (roughly) 99.9% of the
+ working set. Divide by 1024 so it becomes a shift, which gives
+ almost exactly 99.9%. */
+ working_set_cum_values[NUM_GCOV_WORKING_SETS-1]
+ = summary->sum_all - summary->sum_all/1024;
+
+ /* Next, walk through the histogram in decending order of hotness
+ and compute the statistics for the working set summary array.
+ As histogram entries are accumulated, we check to see which
+ working set entries have had their expected cum_value reached
+ and fill them in, walking the working set entries in increasing
+ size of cum_value. */
+ ws_ix = 0; /* The current entry into the working set array. */
+ cum = 0; /* The current accumulated counter sum. */
+ count = 0; /* The current accumulated count of block counters. */
+ for (h_ix = GCOV_HISTOGRAM_SIZE - 1;
+ h_ix >= 0 && ws_ix < NUM_GCOV_WORKING_SETS; h_ix--)
+ {
+ histo_bucket = &summary->histogram[h_ix];
+
+ /* If we haven't reached the required cumulative counter value for
+ the current working set percentage, simply accumulate this histogram
+ entry into the running sums and continue to the next histogram
+ entry. */
+ if (cum + histo_bucket->cum_value < working_set_cum_values[ws_ix])
+ {
+ cum += histo_bucket->cum_value;
+ count += histo_bucket->num_counters;
+ continue;
+ }
+
+ /* If adding the current histogram entry's cumulative counter value
+ causes us to exceed the current working set size, then estimate
+ how many of this histogram entry's counter values are required to
+ reach the working set size, and fill in working set entries
+ as we reach their expected cumulative value. */
+ for (c_num = 0, tmp_cum = cum;
+ c_num < histo_bucket->num_counters && ws_ix < NUM_GCOV_WORKING_SETS;
+ c_num++)
+ {
+ count++;
+ /* If we haven't reached the last histogram entry counter, add
+ in the minimum value again. This will underestimate the
+ cumulative sum so far, because many of the counter values in this
+ entry may have been larger than the minimum. We could add in the
+ average value every time, but that would require an expensive
+ divide operation. */
+ if (c_num + 1 < histo_bucket->num_counters)
+ tmp_cum += histo_bucket->min_value;
+ /* If we have reached the last histogram entry counter, then add
+ in the entire cumulative value. */
+ else
+ tmp_cum = cum + histo_bucket->cum_value;
+
+ /* Next walk through successive working set entries and fill in
+ the statistics for any whose size we have reached by accumulating
+ this histogram counter. */
+ while (ws_ix < NUM_GCOV_WORKING_SETS
+ && tmp_cum >= working_set_cum_values[ws_ix])
+ {
+ gcov_working_sets[ws_ix].num_counters = count;
+ gcov_working_sets[ws_ix].min_counter
+ = histo_bucket->min_value;
+ ws_ix++;
+ }
+ }
+ /* Finally, update the running cumulative value since we were
+ using a temporary above. */
+ cum += histo_bucket->cum_value;
+ }
+ gcc_assert (ws_ix == NUM_GCOV_WORKING_SETS);
+}
+#endif /* IN_GCOV <= 0 && !IN_LIBGCOV */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/gcov-src/gcov-io.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/gcov-src/gcov-io.h
new file mode 100644
index 0000000..50ffa55
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/gcov-src/gcov-io.h
@@ -0,0 +1,489 @@
+/* File format for coverage information
+ Copyright (C) 1996-2014 Free Software Foundation, Inc.
+ Contributed by Bob Manson <manson@cygnus.com>.
+ Completely remangled by Nathan Sidwell <nathan@codesourcery.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+
+/* Coverage information is held in two files. A notes file, which is
+ generated by the compiler, and a data file, which is generated by
+ the program under test. Both files use a similar structure. We do
+ not attempt to make these files backwards compatible with previous
+ versions, as you only need coverage information when developing a
+ program. We do hold version information, so that mismatches can be
+ detected, and we use a format that allows tools to skip information
+ they do not understand or are not interested in.
+
+ Numbers are recorded in the 32 bit unsigned binary form of the
+ endianness of the machine generating the file. 64 bit numbers are
+ stored as two 32 bit numbers, the low part first. Strings are
+ padded with 1 to 4 NUL bytes, to bring the length up to a multiple
+ of 4. The number of 4 bytes is stored, followed by the padded
+ string. Zero length and NULL strings are simply stored as a length
+ of zero (they have no trailing NUL or padding).
+
+ int32: byte3 byte2 byte1 byte0 | byte0 byte1 byte2 byte3
+ int64: int32:low int32:high
+ string: int32:0 | int32:length char* char:0 padding
+ padding: | char:0 | char:0 char:0 | char:0 char:0 char:0
+ item: int32 | int64 | string
+
+ The basic format of the files is
+
+ file : int32:magic int32:version int32:stamp record*
+
+ The magic ident is different for the notes and the data files. The
+ magic ident is used to determine the endianness of the file, when
+ reading. The version is the same for both files and is derived
+ from gcc's version number. The stamp value is used to synchronize
+ note and data files and to synchronize merging within a data
+ file. It need not be an absolute time stamp, merely a ticker that
+ increments fast enough and cycles slow enough to distinguish
+ different compile/run/compile cycles.
+
+ Although the ident and version are formally 32 bit numbers, they
+ are derived from 4 character ASCII strings. The version number
+ consists of the single character major version number, a two
+ character minor version number (leading zero for versions less than
+ 10), and a single character indicating the status of the release.
+ That will be 'e' experimental, 'p' prerelease and 'r' for release.
+ Because, by good fortune, these are in alphabetical order, string
+ collating can be used to compare version strings. Be aware that
+ the 'e' designation will (naturally) be unstable and might be
+ incompatible with itself. For gcc 3.4 experimental, it would be
+ '304e' (0x33303465). When the major version reaches 10, the
+ letters A-Z will be used. Assuming minor increments releases every
+ 6 months, we have to make a major increment every 50 years.
+ Assuming major increments releases every 5 years, we're ok for the
+ next 155 years -- good enough for me.
+
+ A record has a tag, length and variable amount of data.
+
+ record: header data
+ header: int32:tag int32:length
+ data: item*
+
+ Records are not nested, but there is a record hierarchy. Tag
+ numbers reflect this hierarchy. Tags are unique across note and
+ data files. Some record types have a varying amount of data. The
+ LENGTH is the number of 4bytes that follow and is usually used to
+ determine how much data. The tag value is split into 4 8-bit
+ fields, one for each of four possible levels. The most significant
+ is allocated first. Unused levels are zero. Active levels are
+ odd-valued, so that the LSB of the level is one. A sub-level
+ incorporates the values of its superlevels. This formatting allows
+ you to determine the tag hierarchy, without understanding the tags
+ themselves, and is similar to the standard section numbering used
+ in technical documents. Level values [1..3f] are used for common
+ tags, values [41..9f] for the notes file and [a1..ff] for the data
+ file.
+
+ The notes file contains the following records
+ note: unit function-graph*
+ unit: header int32:checksum string:source
+ function-graph: announce_function basic_blocks {arcs | lines}*
+ announce_function: header int32:ident
+ int32:lineno_checksum int32:cfg_checksum
+ string:name string:source int32:lineno
+ basic_block: header int32:flags*
+ arcs: header int32:block_no arc*
+ arc: int32:dest_block int32:flags
+ lines: header int32:block_no line*
+ int32:0 string:NULL
+ line: int32:line_no | int32:0 string:filename
+
+ The BASIC_BLOCK record holds per-bb flags. The number of blocks
+ can be inferred from its data length. There is one ARCS record per
+ basic block. The number of arcs from a bb is implicit from the
+ data length. It enumerates the destination bb and per-arc flags.
+ There is one LINES record per basic block, it enumerates the source
+ lines which belong to that basic block. Source file names are
+ introduced by a line number of 0, following lines are from the new
+ source file. The initial source file for the function is NULL, but
+ the current source file should be remembered from one LINES record
+ to the next. The end of a block is indicated by an empty filename
+ - this does not reset the current source file. Note there is no
+ ordering of the ARCS and LINES records: they may be in any order,
+ interleaved in any manner. The current filename follows the order
+ the LINES records are stored in the file, *not* the ordering of the
+ blocks they are for.
+
+ The data file contains the following records.
+ data: {unit summary:object summary:program* function-data*}*
+ unit: header int32:checksum
+ function-data: announce_function present counts
+ announce_function: header int32:ident
+ int32:lineno_checksum int32:cfg_checksum
+ present: header int32:present
+ counts: header int64:count*
+ summary: int32:checksum {count-summary}GCOV_COUNTERS_SUMMABLE
+ count-summary: int32:num int32:runs int64:sum
+ int64:max int64:sum_max histogram
+ histogram: {int32:bitvector}8 histogram-buckets*
+ histogram-buckets: int32:num int64:min int64:sum
+
+ The ANNOUNCE_FUNCTION record is the same as that in the note file,
+ but without the source location. The COUNTS gives the
+ counter values for instrumented features. The about the whole
+ program. The checksum is used for whole program summaries, and
+ disambiguates different programs which include the same
+ instrumented object file. There may be several program summaries,
+ each with a unique checksum. The object summary's checksum is
+ zero. Note that the data file might contain information from
+ several runs concatenated, or the data might be merged.
+
+ This file is included by both the compiler, gcov tools and the
+ runtime support library libgcov. IN_LIBGCOV and IN_GCOV are used to
+ distinguish which case is which. If IN_LIBGCOV is nonzero,
+ libgcov is being built. If IN_GCOV is nonzero, the gcov tools are
+ being built. Otherwise the compiler is being built. IN_GCOV may be
+ positive or negative. If positive, we are compiling a tool that
+ requires additional functions (see the code for knowledge of what
+ those functions are). */
+
+#ifndef GCC_GCOV_IO_H
+#define GCC_GCOV_IO_H
+
+#ifndef IN_LIBGCOV
+/* About the host */
+
+typedef unsigned gcov_unsigned_t;
+typedef unsigned gcov_position_t;
+
+#if LONG_LONG_TYPE_SIZE > 32
+#define GCOV_TYPE_ATOMIC_FETCH_ADD_FN __atomic_fetch_add_8
+#define GCOV_TYPE_ATOMIC_FETCH_ADD BUILT_IN_ATOMIC_FETCH_ADD_8
+#else
+#define GCOV_TYPE_ATOMIC_FETCH_ADD_FN __atomic_fetch_add_4
+#define GCOV_TYPE_ATOMIC_FETCH_ADD BUILT_IN_ATOMIC_FETCH_ADD_4
+#endif
+#define PROFILE_GEN_EDGE_ATOMIC (flag_profile_gen_atomic == 1 || \
+ flag_profile_gen_atomic == 3)
+#define PROFILE_GEN_VALUE_ATOMIC (flag_profile_gen_atomic == 2 || \
+ flag_profile_gen_atomic == 3)
+
+/* gcov_type is typedef'd elsewhere for the compiler */
+#if IN_GCOV
+#define GCOV_LINKAGE static
+typedef HOST_WIDEST_INT gcov_type;
+typedef unsigned HOST_WIDEST_INT gcov_type_unsigned;
+#if IN_GCOV > 0
+#include <sys/types.h>
+#endif
+
+#define FUNC_ID_WIDTH HOST_BITS_PER_WIDE_INT/2
+#define FUNC_ID_MASK ((1L << FUNC_ID_WIDTH) - 1)
+#define EXTRACT_MODULE_ID_FROM_GLOBAL_ID(gid) (unsigned)(((gid) >> FUNC_ID_WIDTH) & FUNC_ID_MASK)
+#define EXTRACT_FUNC_ID_FROM_GLOBAL_ID(gid) (unsigned)((gid) & FUNC_ID_MASK)
+#define FUNC_GLOBAL_ID(m,f) ((((HOST_WIDE_INT) (m)) << FUNC_ID_WIDTH) | (f)
+
+#else /*!IN_GCOV */
+#define GCOV_TYPE_SIZE (LONG_LONG_TYPE_SIZE > 32 ? 64 : 32)
+#endif
+
+#if defined (HOST_HAS_F_SETLKW)
+#define GCOV_LOCKED 1
+#else
+#define GCOV_LOCKED 0
+#endif
+
+#define ATTRIBUTE_HIDDEN
+
+#endif /* !IN_LIBGOCV */
+
+#ifndef GCOV_LINKAGE
+#define GCOV_LINKAGE extern
+#endif
+
+/* File suffixes. */
+#define GCOV_DATA_SUFFIX ".gcda"
+#define GCOV_NOTE_SUFFIX ".gcno"
+
+/* File magic. Must not be palindromes. */
+#define GCOV_DATA_MAGIC ((gcov_unsigned_t)0x67636461) /* "gcda" */
+#define GCOV_NOTE_MAGIC ((gcov_unsigned_t)0x67636e6f) /* "gcno" */
+
+/* gcov-iov.h is automatically generated by the makefile from
+ version.c, it looks like
+ #define GCOV_VERSION ((gcov_unsigned_t)0x89abcdef)
+*/
+#include "gcov-iov.h"
+
+/* Convert a magic or version number to a 4 character string. */
+#define GCOV_UNSIGNED2STRING(ARRAY,VALUE) \
+ ((ARRAY)[0] = (char)((VALUE) >> 24), \
+ (ARRAY)[1] = (char)((VALUE) >> 16), \
+ (ARRAY)[2] = (char)((VALUE) >> 8), \
+ (ARRAY)[3] = (char)((VALUE) >> 0))
+
+/* The record tags. Values [1..3f] are for tags which may be in either
+ file. Values [41..9f] for those in the note file and [a1..ff] for
+ the data file. The tag value zero is used as an explicit end of
+ file marker -- it is not required to be present. */
+
+#define GCOV_TAG_FUNCTION ((gcov_unsigned_t)0x01000000)
+#define GCOV_TAG_FUNCTION_LENGTH (3)
+#define GCOV_TAG_BLOCKS ((gcov_unsigned_t)0x01410000)
+#define GCOV_TAG_BLOCKS_LENGTH(NUM) (NUM)
+#define GCOV_TAG_BLOCKS_NUM(LENGTH) (LENGTH)
+#define GCOV_TAG_ARCS ((gcov_unsigned_t)0x01430000)
+#define GCOV_TAG_ARCS_LENGTH(NUM) (1 + (NUM) * 2)
+#define GCOV_TAG_ARCS_NUM(LENGTH) (((LENGTH) - 1) / 2)
+#define GCOV_TAG_LINES ((gcov_unsigned_t)0x01450000)
+#define GCOV_TAG_COUNTER_BASE ((gcov_unsigned_t)0x01a10000)
+#define GCOV_TAG_COUNTER_LENGTH(NUM) ((NUM) * 2)
+#define GCOV_TAG_COUNTER_NUM(LENGTH) ((LENGTH) / 2)
+#define GCOV_TAG_OBJECT_SUMMARY ((gcov_unsigned_t)0xa1000000) /* Obsolete */
+#define GCOV_TAG_PROGRAM_SUMMARY ((gcov_unsigned_t)0xa3000000)
+#define GCOV_TAG_SUMMARY_LENGTH(NUM) \
+ (1 + GCOV_COUNTERS_SUMMABLE * (10 + 3 * 2) + (NUM) * 5)
+#define GCOV_TAG_MODULE_INFO ((gcov_unsigned_t)0xab000000)
+#define GCOV_TAG_AFDO_FILE_NAMES ((gcov_unsigned_t)0xaa000000)
+#define GCOV_TAG_AFDO_FUNCTION ((gcov_unsigned_t)0xac000000)
+#define GCOV_TAG_AFDO_MODULE_GROUPING ((gcov_unsigned_t)0xae000000)
+#define GCOV_TAG_AFDO_WORKING_SET ((gcov_unsigned_t)0xaf000000)
+
+/* Counters that are collected. */
+#define DEF_GCOV_COUNTER(COUNTER, NAME, MERGE_FN) COUNTER,
+enum {
+#include "gcov-counter.def"
+GCOV_COUNTERS
+};
+#undef DEF_GCOV_COUNTER
+
+/* Counters which can be summaried. */
+#define GCOV_COUNTERS_SUMMABLE (GCOV_COUNTER_ARCS + 1)
+
+/* The first of counters used for value profiling. They must form a
+ consecutive interval and their order must match the order of
+ HIST_TYPEs in value-prof.h. */
+#define GCOV_FIRST_VALUE_COUNTER GCOV_COUNTERS_SUMMABLE
+
+/* The last of counters used for value profiling. */
+#define GCOV_LAST_VALUE_COUNTER (GCOV_COUNTERS - 2)
+
+/* Number of counters used for value profiling. */
+#define GCOV_N_VALUE_COUNTERS \
+ (GCOV_LAST_VALUE_COUNTER - GCOV_FIRST_VALUE_COUNTER + 1)
+
+#define GCOV_ICALL_TOPN_VAL 2 /* Track two hottest callees */
+#define GCOV_ICALL_TOPN_NCOUNTS 9 /* The number of counter entries per icall callsite */
+
+/* Convert a counter index to a tag. */
+#define GCOV_TAG_FOR_COUNTER(COUNT) \
+ (GCOV_TAG_COUNTER_BASE + ((gcov_unsigned_t)(COUNT) << 17))
+/* Convert a tag to a counter. */
+#define GCOV_COUNTER_FOR_TAG(TAG) \
+ ((unsigned)(((TAG) - GCOV_TAG_COUNTER_BASE) >> 17))
+/* Check whether a tag is a counter tag. */
+#define GCOV_TAG_IS_COUNTER(TAG) \
+ (!((TAG) & 0xFFFF) && GCOV_COUNTER_FOR_TAG (TAG) < GCOV_COUNTERS)
+
+/* The tag level mask has 1's in the position of the inner levels, &
+ the lsb of the current level, and zero on the current and outer
+ levels. */
+#define GCOV_TAG_MASK(TAG) (((TAG) - 1) ^ (TAG))
+
+/* Return nonzero if SUB is an immediate subtag of TAG. */
+#define GCOV_TAG_IS_SUBTAG(TAG,SUB) \
+ (GCOV_TAG_MASK (TAG) >> 8 == GCOV_TAG_MASK (SUB) \
+ && !(((SUB) ^ (TAG)) & ~GCOV_TAG_MASK (TAG)))
+
+/* Return nonzero if SUB is at a sublevel to TAG. */
+#define GCOV_TAG_IS_SUBLEVEL(TAG,SUB) \
+ (GCOV_TAG_MASK (TAG) > GCOV_TAG_MASK (SUB))
+
+/* Basic block flags. */
+#define GCOV_BLOCK_UNEXPECTED (1 << 1)
+
+/* Arc flags. */
+#define GCOV_ARC_ON_TREE (1 << 0)
+#define GCOV_ARC_FAKE (1 << 1)
+#define GCOV_ARC_FALLTHROUGH (1 << 2)
+
+/* Structured records. */
+
+/* Structure used for each bucket of the log2 histogram of counter values. */
+typedef struct
+{
+ /* Number of counters whose profile count falls within the bucket. */
+ gcov_unsigned_t num_counters;
+ /* Smallest profile count included in this bucket. */
+ gcov_type min_value;
+ /* Cumulative value of the profile counts in this bucket. */
+ gcov_type cum_value;
+} gcov_bucket_type;
+
+/* For a log2 scale histogram with each range split into 4
+ linear sub-ranges, there will be at most 64 (max gcov_type bit size) - 1 log2
+ ranges since the lowest 2 log2 values share the lowest 4 linear
+ sub-range (values 0 - 3). This is 252 total entries (63*4). */
+
+#define GCOV_HISTOGRAM_SIZE 252
+
+/* How many unsigned ints are required to hold a bit vector of non-zero
+ histogram entries when the histogram is written to the gcov file.
+ This is essentially a ceiling divide by 32 bits. */
+#define GCOV_HISTOGRAM_BITVECTOR_SIZE (GCOV_HISTOGRAM_SIZE + 31) / 32
+
+/* Cumulative counter data. */
+struct gcov_ctr_summary
+{
+ gcov_unsigned_t num; /* number of counters. */
+ gcov_unsigned_t runs; /* number of program runs */
+ gcov_type sum_all; /* sum of all counters accumulated. */
+ gcov_type run_max; /* maximum value on a single run. */
+ gcov_type sum_max; /* sum of individual run max values. */
+ gcov_bucket_type histogram[GCOV_HISTOGRAM_SIZE]; /* histogram of
+ counter values. */
+};
+
+/* Object & program summary record. */
+struct gcov_summary
+{
+ gcov_unsigned_t checksum; /* checksum of program */
+ struct gcov_ctr_summary ctrs[GCOV_COUNTERS_SUMMABLE];
+};
+
+#define GCOV_MODULE_UNKNOWN_LANG 0
+#define GCOV_MODULE_C_LANG 1
+#define GCOV_MODULE_CPP_LANG 2
+#define GCOV_MODULE_FORT_LANG 3
+
+#define GCOV_MODULE_ASM_STMTS (1 << 16)
+#define GCOV_MODULE_LANG_MASK 0xffff
+
+/* Source module info. The data structure is used in
+ both runtime and profile-use phase. Make sure to allocate
+ enough space for the variable length member. */
+struct gcov_module_info
+{
+ gcov_unsigned_t ident;
+ gcov_unsigned_t is_primary; /* this is overloaded to mean two things:
+ (1) means FDO/LIPO in instrumented binary.
+ (2) means IS_PRIMARY in persistent file or
+ memory copy used in profile-use. */
+ gcov_unsigned_t flags; /* bit 0: is_exported,
+ bit 1: need to include all the auxiliary
+ modules in use compilation. */
+ gcov_unsigned_t lang; /* lower 16 bits encode the language, and the upper
+ 16 bits enocde other attributes, such as whether
+ any assembler is present in the source, etc. */
+ gcov_unsigned_t ggc_memory; /* memory needed for parsing in kb */
+ char *da_filename;
+ char *source_filename;
+ gcov_unsigned_t num_quote_paths;
+ gcov_unsigned_t num_bracket_paths;
+ gcov_unsigned_t num_system_paths;
+ gcov_unsigned_t num_cpp_defines;
+ gcov_unsigned_t num_cpp_includes;
+ gcov_unsigned_t num_cl_args;
+ char *string_array[1];
+};
+
+extern struct gcov_module_info **module_infos;
+extern unsigned primary_module_id;
+#define SET_MODULE_INCLUDE_ALL_AUX(modu) ((modu->flags |= 0x2))
+#define MODULE_INCLUDE_ALL_AUX_FLAG(modu) ((modu->flags & 0x2))
+#define SET_MODULE_EXPORTED(modu) ((modu->flags |= 0x1))
+#define MODULE_EXPORTED_FLAG(modu) ((modu->flags & 0x1))
+#define PRIMARY_MODULE_EXPORTED \
+ (MODULE_EXPORTED_FLAG (module_infos[0]) \
+ && !((module_infos[0]->lang & GCOV_MODULE_ASM_STMTS) \
+ && flag_ripa_disallow_asm_modules))
+
+#if !defined(inhibit_libc)
+
+/* Functions for reading and writing gcov files. In libgcov you can
+ open the file for reading then writing. Elsewhere you can open the
+ file either for reading or for writing. When reading a file you may
+ use the gcov_read_* functions, gcov_sync, gcov_position, &
+ gcov_error. When writing a file you may use the gcov_write
+ functions, gcov_seek & gcov_error. When a file is to be rewritten
+ you use the functions for reading, then gcov_rewrite then the
+ functions for writing. Your file may become corrupted if you break
+ these invariants. */
+
+#if !IN_LIBGCOV
+GCOV_LINKAGE int gcov_open (const char */*name*/, int /*direction*/);
+GCOV_LINKAGE int gcov_magic (gcov_unsigned_t, gcov_unsigned_t);
+#endif
+
+/* Available everywhere. */
+GCOV_LINKAGE int gcov_close (void) ATTRIBUTE_HIDDEN;
+GCOV_LINKAGE gcov_unsigned_t gcov_read_unsigned (void) ATTRIBUTE_HIDDEN;
+GCOV_LINKAGE gcov_type gcov_read_counter (void) ATTRIBUTE_HIDDEN;
+GCOV_LINKAGE void gcov_read_summary (struct gcov_summary *) ATTRIBUTE_HIDDEN;
+GCOV_LINKAGE const char *gcov_read_string (void);
+GCOV_LINKAGE void gcov_sync (gcov_position_t /*base*/,
+ gcov_unsigned_t /*length */);
+
+
+#if !IN_LIBGCOV && IN_GCOV != 1
+GCOV_LINKAGE void gcov_read_module_info (struct gcov_module_info *mod_info,
+ gcov_unsigned_t len) ATTRIBUTE_HIDDEN;
+#endif
+
+#if !IN_GCOV
+/* Available outside gcov */
+GCOV_LINKAGE void gcov_write_unsigned (gcov_unsigned_t) ATTRIBUTE_HIDDEN;
+#endif
+
+#if !IN_GCOV && !IN_LIBGCOV
+/* Available only in compiler */
+GCOV_LINKAGE unsigned gcov_histo_index (gcov_type value);
+GCOV_LINKAGE void gcov_write_string (const char *);
+GCOV_LINKAGE gcov_position_t gcov_write_tag (gcov_unsigned_t);
+GCOV_LINKAGE void gcov_write_length (gcov_position_t /*position*/);
+#endif
+
+#if IN_GCOV <= 0 && !IN_LIBGCOV
+/* Available in gcov-dump and the compiler. */
+
+/* Number of data points in the working set summary array. Using 128
+ provides information for at least every 1% increment of the total
+ profile size. The last entry is hardwired to 99.9% of the total. */
+#define NUM_GCOV_WORKING_SETS 128
+
+/* Working set size statistics for a given percentage of the entire
+ profile (sum_all from the counter summary). */
+typedef struct gcov_working_set_info
+{
+ /* Number of hot counters included in this working set. */
+ unsigned num_counters;
+ /* Smallest counter included in this working set. */
+ gcov_type min_counter;
+} gcov_working_set_t;
+
+GCOV_LINKAGE void compute_working_sets (const struct gcov_ctr_summary *summary,
+ gcov_working_set_t *gcov_working_sets);
+#endif
+
+#if IN_GCOV > 0
+/* Available in gcov */
+GCOV_LINKAGE time_t gcov_time (void);
+#endif
+
+#endif /* !inhibit_libc */
+
+#endif /* GCC_GCOV_IO_H */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/gcov-src/gcov-iov.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/gcov-src/gcov-iov.h
new file mode 100644
index 0000000..ec6b717
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/gcov-src/gcov-iov.h
@@ -0,0 +1,4 @@
+/* Generated automatically by the program `build/gcov-iov'
+ from `4.9.x-google (4 9) and prerelease (*)'. */
+
+#define GCOV_VERSION ((gcov_unsigned_t)0x3430392a) /* 409* */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/gcov-src/libgcov-driver.c b/lib/gcc/arm-linux-androideabi/4.9.x-google/gcov-src/libgcov-driver.c
new file mode 100644
index 0000000..dc8cf36
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/gcov-src/libgcov-driver.c
@@ -0,0 +1,1193 @@
+/* Routines required for instrumenting a program. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1989-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#include "libgcov.h"
+
+#if defined(inhibit_libc)
+/* If libc and its header files are not available, provide dummy functions. */
+
+#if defined(L_gcov)
+void __gcov_init (struct gcov_info *p __attribute__ ((unused))) {}
+#endif
+
+#else /* inhibit_libc */
+
+#include <string.h>
+#if GCOV_LOCKED
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/stat.h>
+#endif
+
+#ifdef L_gcov
+#include "gcov-io.c"
+
+#ifndef IN_GCOV_TOOL
+extern gcov_unsigned_t __gcov_sampling_period;
+extern gcov_unsigned_t __gcov_has_sampling;
+static int gcov_sampling_period_initialized = 0;
+#endif
+
+/* Unique identifier assigned to each module (object file). */
+static gcov_unsigned_t gcov_cur_module_id = 0;
+
+
+/* Dynamic call graph build and form module groups. */
+int __gcov_compute_module_groups (void) ATTRIBUTE_HIDDEN;
+void __gcov_finalize_dyn_callgraph (void) ATTRIBUTE_HIDDEN;
+
+/* The following functions can be called from outside of this file. */
+extern void gcov_clear (void) ATTRIBUTE_HIDDEN;
+extern void gcov_exit (void) ATTRIBUTE_HIDDEN;
+extern void set_gcov_dump_complete (void) ATTRIBUTE_HIDDEN;
+extern void reset_gcov_dump_complete (void) ATTRIBUTE_HIDDEN;
+extern int get_gcov_dump_complete (void) ATTRIBUTE_HIDDEN;
+extern void set_gcov_list (struct gcov_info *) ATTRIBUTE_HIDDEN;
+__attribute__((weak)) void __coverage_callback (gcov_type, int);
+
+#ifndef IN_GCOV_TOOL
+/* Create a strong reference to these symbols so that they are
+ unconditionally pulled into the instrumented binary, even when
+ the only reference is a weak reference. This is necessary because
+ we are using weak references to enable references from code that
+ may not be linked with libgcov. These are the only symbols that
+ should be accessed via link references from application code!
+
+ A subtlety of the linker is that it will only resolve weak references
+ defined within archive libraries when there is a strong reference to
+ something else defined within the same object file. Since these functions
+ are defined within their own object files, they would not automatically
+ get resolved. Since there are symbols within the main L_gcov
+ section that are strongly referenced during -fprofile-generate and
+ -ftest-coverage builds, these dummy symbols will always need to be
+ resolved. */
+void (*__gcov_dummy_ref1)(void) = &__gcov_reset;
+void (*__gcov_dummy_ref2)(void) = &__gcov_dump;
+extern char *__gcov_get_profile_prefix (void);
+char *(*__gcov_dummy_ref3)(void) = &__gcov_get_profile_prefix;
+extern void __gcov_set_sampling_period (unsigned int period);
+char *(*__gcov_dummy_ref4)(void) = &__gcov_set_sampling_period;
+extern unsigned int __gcov_sampling_enabled (void);
+char *(*__gcov_dummy_ref5)(void) = &__gcov_sampling_enabled;
+extern void __gcov_flush (void);
+char *(*__gcov_dummy_ref6)(void) = &__gcov_flush;
+extern unsigned int __gcov_profiling_for_test_coverage (void);
+char *(*__gcov_dummy_ref7)(void) = &__gcov_profiling_for_test_coverage;
+#endif
+
+/* Default callback function for profile instrumentation callback. */
+__attribute__((weak)) void
+__coverage_callback (gcov_type funcdef_no __attribute__ ((unused)),
+ int edge_no __attribute__ ((unused)))
+{
+ /* nothing */
+}
+
+struct gcov_fn_buffer
+{
+ struct gcov_fn_buffer *next;
+ unsigned fn_ix;
+ struct gcov_fn_info info;
+ /* note gcov_fn_info ends in a trailing array. */
+};
+
+struct gcov_summary_buffer
+{
+ struct gcov_summary_buffer *next;
+ struct gcov_summary summary;
+};
+
+/* Chain of per-object gcov structures. */
+extern struct gcov_info *__gcov_list;
+
+/* Set the head of gcov_list. */
+void
+set_gcov_list (struct gcov_info *head)
+{
+ __gcov_list = head;
+}
+
+/* Size of the longest file name. */
+/* We need to expose this static variable when compiling for gcov-tool. */
+#ifndef IN_GCOV_TOOL
+static
+#endif
+size_t gcov_max_filename = 0;
+
+/* Flag when the profile has already been dumped via __gcov_dump(). */
+static int gcov_dump_complete;
+
+/* A global function that get the vaule of gcov_dump_complete. */
+
+int
+get_gcov_dump_complete (void)
+{
+ return gcov_dump_complete;
+}
+
+/* A global functino that set the vaule of gcov_dump_complete. Will
+ be used in __gcov_dump() in libgcov-interface.c. */
+
+void
+set_gcov_dump_complete (void)
+{
+ gcov_dump_complete = 1;
+}
+
+/* A global functino that set the vaule of gcov_dump_complete. Will
+ be used in __gcov_reset() in libgcov-interface.c. */
+
+void
+reset_gcov_dump_complete (void)
+{
+ gcov_dump_complete = 0;
+}
+
+/* A utility function for outputing errors. */
+static int gcov_error (const char *, ...);
+
+static struct gcov_fn_buffer *
+free_fn_data (const struct gcov_info *gi_ptr, struct gcov_fn_buffer *buffer,
+ unsigned limit)
+{
+ struct gcov_fn_buffer *next;
+ unsigned ix, n_ctr = 0;
+
+ if (!buffer)
+ return 0;
+ next = buffer->next;
+
+ for (ix = 0; ix != limit; ix++)
+ if (gi_ptr->merge[ix])
+ free (buffer->info.ctrs[n_ctr++].values);
+ free (buffer);
+ return next;
+}
+
+static struct gcov_fn_buffer **
+buffer_fn_data (const char *filename, const struct gcov_info *gi_ptr,
+ struct gcov_fn_buffer **end_ptr, unsigned fn_ix)
+{
+ unsigned n_ctrs = 0, ix = 0;
+ struct gcov_fn_buffer *fn_buffer;
+ unsigned len;
+
+ for (ix = GCOV_COUNTERS; ix--;)
+ if (gi_ptr->merge[ix])
+ n_ctrs++;
+
+ len = sizeof (*fn_buffer) + sizeof (fn_buffer->info.ctrs[0]) * n_ctrs;
+ fn_buffer = (struct gcov_fn_buffer *) xmalloc (len);
+
+ if (!fn_buffer)
+ goto fail;
+
+ fn_buffer->next = 0;
+ fn_buffer->fn_ix = fn_ix;
+ fn_buffer->info.ident = gcov_read_unsigned ();
+ fn_buffer->info.lineno_checksum = gcov_read_unsigned ();
+ fn_buffer->info.cfg_checksum = gcov_read_unsigned ();
+
+ for (n_ctrs = ix = 0; ix != GCOV_COUNTERS; ix++)
+ {
+ gcov_unsigned_t length;
+ gcov_type *values;
+
+ if (!gi_ptr->merge[ix])
+ continue;
+
+ if (gcov_read_unsigned () != GCOV_TAG_FOR_COUNTER (ix))
+ {
+ len = 0;
+ goto fail;
+ }
+
+ length = GCOV_TAG_COUNTER_NUM (gcov_read_unsigned ());
+ len = length * sizeof (gcov_type);
+ values = (gcov_type *) xmalloc (len);
+ if (!values)
+ goto fail;
+
+ fn_buffer->info.ctrs[n_ctrs].num = length;
+ fn_buffer->info.ctrs[n_ctrs].values = values;
+
+ while (length--)
+ *values++ = gcov_read_counter ();
+ n_ctrs++;
+ }
+
+ *end_ptr = fn_buffer;
+ return &fn_buffer->next;
+
+fail:
+ gcov_error ("profiling:%s:Function %u %s %u \n", filename, fn_ix,
+ len ? "cannot allocate" : "counter mismatch", len ? len : ix);
+
+ return (struct gcov_fn_buffer **)free_fn_data (gi_ptr, fn_buffer, ix);
+}
+
+/* Determine whether a counter is active. */
+
+static inline int
+gcov_counter_active (const struct gcov_info *info, unsigned int type)
+{
+ return (info->merge[type] != 0);
+}
+
+/* Add an unsigned value to the current crc */
+
+static gcov_unsigned_t
+crc32_unsigned (gcov_unsigned_t crc32, gcov_unsigned_t value)
+{
+ unsigned ix;
+
+ for (ix = 32; ix--; value <<= 1)
+ {
+ unsigned feedback;
+
+ feedback = (value ^ crc32) & 0x80000000 ? 0x04c11db7 : 0;
+ crc32 <<= 1;
+ crc32 ^= feedback;
+ }
+
+ return crc32;
+}
+
+/* Check if VERSION of the info block PTR matches libgcov one.
+ Return 1 on success, or zero in case of versions mismatch.
+ If FILENAME is not NULL, its value used for reporting purposes
+ instead of value from the info block. */
+
+static int
+gcov_version (struct gcov_info *ptr, gcov_unsigned_t version,
+ const char *filename)
+{
+ if (version != GCOV_VERSION)
+ {
+ char v[4], e[4];
+
+ GCOV_UNSIGNED2STRING (v, version);
+ GCOV_UNSIGNED2STRING (e, GCOV_VERSION);
+
+ if (filename)
+ gcov_error ("profiling:%s:Version mismatch - expected %.4s got %.4s\n",
+ filename? filename : ptr->filename, e, v);
+ else
+ gcov_error ("profiling:Version mismatch - expected %.4s got %.4s\n", e, v);
+
+ return 0;
+ }
+ return 1;
+}
+
+/* Insert counter VALUE into HISTOGRAM. */
+
+static void
+gcov_histogram_insert(gcov_bucket_type *histogram, gcov_type value)
+{
+ unsigned i;
+
+ i = gcov_histo_index(value);
+ histogram[i].num_counters++;
+ histogram[i].cum_value += value;
+ if (value < histogram[i].min_value)
+ histogram[i].min_value = value;
+}
+
+/* Computes a histogram of the arc counters to place in the summary SUM. */
+
+static void
+gcov_compute_histogram (struct gcov_summary *sum)
+{
+ struct gcov_info *gi_ptr;
+ const struct gcov_fn_info *gfi_ptr;
+ const struct gcov_ctr_info *ci_ptr;
+ struct gcov_ctr_summary *cs_ptr;
+ unsigned t_ix, f_ix, ctr_info_ix, ix;
+ int h_ix;
+
+ /* This currently only applies to arc counters. */
+ t_ix = GCOV_COUNTER_ARCS;
+
+ /* First check if there are any counts recorded for this counter. */
+ cs_ptr = &(sum->ctrs[t_ix]);
+ if (!cs_ptr->num)
+ return;
+
+ for (h_ix = 0; h_ix < GCOV_HISTOGRAM_SIZE; h_ix++)
+ {
+ cs_ptr->histogram[h_ix].num_counters = 0;
+ cs_ptr->histogram[h_ix].min_value = cs_ptr->run_max;
+ cs_ptr->histogram[h_ix].cum_value = 0;
+ }
+
+ /* Walk through all the per-object structures and record each of
+ the count values in histogram. */
+ for (gi_ptr = __gcov_list; gi_ptr; gi_ptr = gi_ptr->next)
+ {
+ if (!gi_ptr->merge[t_ix])
+ continue;
+
+ /* Find the appropriate index into the gcov_ctr_info array
+ for the counter we are currently working on based on the
+ existence of the merge function pointer for this object. */
+ for (ix = 0, ctr_info_ix = 0; ix < t_ix; ix++)
+ {
+ if (gi_ptr->merge[ix])
+ ctr_info_ix++;
+ }
+ for (f_ix = 0; f_ix != gi_ptr->n_functions; f_ix++)
+ {
+ gfi_ptr = gi_ptr->functions[f_ix];
+
+ if (!gfi_ptr || gfi_ptr->key != gi_ptr)
+ continue;
+
+ ci_ptr = &gfi_ptr->ctrs[ctr_info_ix];
+ for (ix = 0; ix < ci_ptr->num; ix++)
+ gcov_histogram_insert (cs_ptr->histogram, ci_ptr->values[ix]);
+ }
+ }
+}
+
+/* gcda filename. */
+static char *gi_filename;
+/* buffer for the fn_data from another program. */
+static struct gcov_fn_buffer *fn_buffer;
+/* buffer for summary from other programs to be written out. */
+static struct gcov_summary_buffer *sum_buffer;
+/* If application calls fork or exec multiple times, we end up storing
+ profile repeadely. We should not account this as multiple runs or
+ functions executed once may mistakely become cold. */
+static int run_accounted = 0;
+
+/* This funtions computes the program level summary and the histo-gram.
+ It computes and returns CRC32 and stored summary in THIS_PRG. */
+
+static gcov_unsigned_t
+gcov_exit_compute_summary (struct gcov_summary *this_prg)
+{
+ struct gcov_info *gi_ptr;
+ const struct gcov_fn_info *gfi_ptr;
+ struct gcov_ctr_summary *cs_ptr;
+ const struct gcov_ctr_info *ci_ptr;
+ int f_ix;
+ unsigned t_ix;
+ gcov_unsigned_t c_num;
+ gcov_unsigned_t crc32 = 0;
+
+ /* Find the totals for this execution. */
+ memset (this_prg, 0, sizeof (*this_prg));
+ for (gi_ptr = __gcov_list; gi_ptr; gi_ptr = gi_ptr->next)
+ {
+ crc32 = crc32_unsigned (crc32, gi_ptr->stamp);
+ crc32 = crc32_unsigned (crc32, gi_ptr->n_functions);
+
+ for (f_ix = 0; (unsigned)f_ix != gi_ptr->n_functions; f_ix++)
+ {
+ gfi_ptr = gi_ptr->functions[f_ix];
+
+ if (gfi_ptr && gfi_ptr->key != gi_ptr)
+ gfi_ptr = 0;
+
+ crc32 = crc32_unsigned (crc32, gfi_ptr ? gfi_ptr->cfg_checksum : 0);
+ crc32 = crc32_unsigned (crc32,
+ gfi_ptr ? gfi_ptr->lineno_checksum : 0);
+ if (!gfi_ptr)
+ continue;
+
+ ci_ptr = gfi_ptr->ctrs;
+ for (t_ix = 0; t_ix != GCOV_COUNTERS_SUMMABLE; t_ix++)
+ {
+ if (!gi_ptr->merge[t_ix])
+ continue;
+
+ cs_ptr = &(this_prg->ctrs[t_ix]);
+ cs_ptr->num += ci_ptr->num;
+ crc32 = crc32_unsigned (crc32, ci_ptr->num);
+
+ for (c_num = 0; c_num < ci_ptr->num; c_num++)
+ {
+ cs_ptr->sum_all += ci_ptr->values[c_num];
+ if (cs_ptr->run_max < ci_ptr->values[c_num])
+ cs_ptr->run_max = ci_ptr->values[c_num];
+ }
+ ci_ptr++;
+ }
+ }
+ }
+ gcov_compute_histogram (this_prg);
+ return crc32;
+}
+
+/* A struct that bundles all the related information about the
+ gcda filename. */
+struct gcov_filename_aux{
+ char *gi_filename_up;
+ int gcov_prefix_strip;
+ size_t prefix_length;
+};
+
+/* Including system dependent components. */
+#include "libgcov-driver-system.c"
+
+/* Scan through the current open gcda file corresponding to GI_PTR
+ to locate the end position of the last summary, returned in
+ SUMMARY_END_POS_P. Return 0 on success, -1 on error. */
+static int
+gcov_scan_summary_end (struct gcov_info *gi_ptr,
+ gcov_position_t *summary_end_pos_p)
+{
+ gcov_unsigned_t tag, version, stamp;
+ tag = gcov_read_unsigned ();
+ if (tag != GCOV_DATA_MAGIC)
+ {
+ gcov_error ("profiling:%s:Not a gcov data file\n", gi_filename);
+ return -1;
+ }
+
+ version = gcov_read_unsigned ();
+ if (!gcov_version (gi_ptr, version, gi_filename))
+ return -1;
+
+ stamp = gcov_read_unsigned ();
+ if (stamp != gi_ptr->stamp)
+ /* Read from a different compilation. Overwrite the file. */
+ return -1;
+
+ /* Look for program summary. */
+ while (1)
+ {
+ struct gcov_summary tmp;
+
+ *summary_end_pos_p = gcov_position ();
+ tag = gcov_read_unsigned ();
+ if (tag != GCOV_TAG_PROGRAM_SUMMARY)
+ break;
+
+ gcov_read_unsigned ();
+ gcov_read_summary (&tmp);
+ if (gcov_is_error ())
+ return -1;
+ }
+
+ return 0;
+}
+
+/* This function merges counters in GI_PTR to an existing gcda file.
+ Return 0 on success.
+ Return -1 on error. In this case, caller will goto read_fatal. */
+
+static int
+gcov_exit_merge_gcda (struct gcov_info *gi_ptr,
+ struct gcov_summary *prg_p,
+ struct gcov_summary *this_prg,
+ gcov_position_t *summary_pos_p,
+ gcov_position_t *eof_pos_p,
+ gcov_unsigned_t crc32)
+{
+ gcov_unsigned_t tag, length;
+ unsigned t_ix;
+ int f_ix;
+ int error = 0;
+ struct gcov_fn_buffer **fn_tail = &fn_buffer;
+ struct gcov_summary_buffer **sum_tail = &sum_buffer;
+
+ length = gcov_read_unsigned ();
+ if (!gcov_version (gi_ptr, length, gi_filename))
+ return -1;
+
+ length = gcov_read_unsigned ();
+ if (length != gi_ptr->stamp)
+ /* Read from a different compilation. Overwrite the file. */
+ return 0;
+
+ /* Look for program summary. */
+ for (f_ix = 0;;)
+ {
+ struct gcov_summary tmp;
+
+ *eof_pos_p = gcov_position ();
+ tag = gcov_read_unsigned ();
+ if (tag != GCOV_TAG_PROGRAM_SUMMARY)
+ break;
+
+ f_ix--;
+ length = gcov_read_unsigned ();
+ gcov_read_summary (&tmp);
+ if ((error = gcov_is_error ()))
+ goto read_error;
+ if (*summary_pos_p)
+ {
+ /* Save all summaries after the one that will be
+ merged into below. These will need to be rewritten
+ as histogram merging may change the number of non-zero
+ histogram entries that will be emitted, and thus the
+ size of the merged summary. */
+ (*sum_tail) = (struct gcov_summary_buffer *)
+ xmalloc (sizeof(struct gcov_summary_buffer));
+ (*sum_tail)->summary = tmp;
+ (*sum_tail)->next = 0;
+ sum_tail = &((*sum_tail)->next);
+ goto next_summary;
+ }
+ if (tmp.checksum != crc32)
+ goto next_summary;
+
+ for (t_ix = 0; t_ix != GCOV_COUNTERS_SUMMABLE; t_ix++)
+ if (tmp.ctrs[t_ix].num != this_prg->ctrs[t_ix].num)
+ goto next_summary;
+ *prg_p = tmp;
+ *summary_pos_p = *eof_pos_p;
+
+ next_summary:;
+ }
+
+ /* Merge execution counts for each function. */
+ for (f_ix = 0; (unsigned)f_ix != gi_ptr->n_functions;
+ f_ix++, tag = gcov_read_unsigned ())
+ {
+ const struct gcov_ctr_info *ci_ptr;
+ const struct gcov_fn_info *gfi_ptr = gi_ptr->functions[f_ix];
+
+ if (tag != GCOV_TAG_FUNCTION)
+ goto read_mismatch;
+
+ length = gcov_read_unsigned ();
+ if (!length)
+ /* This function did not appear in the other program.
+ We have nothing to merge. */
+ continue;
+
+ if (length != GCOV_TAG_FUNCTION_LENGTH)
+ goto read_mismatch;
+
+ if (!gfi_ptr || gfi_ptr->key != gi_ptr)
+ {
+ /* This function appears in the other program. We
+ need to buffer the information in order to write
+ it back out -- we'll be inserting data before
+ this point, so cannot simply keep the data in the
+ file. */
+ fn_tail = buffer_fn_data (gi_filename,
+ gi_ptr, fn_tail, f_ix);
+ if (!fn_tail)
+ goto read_mismatch;
+ continue;
+ }
+
+ length = gcov_read_unsigned ();
+ if (length != gfi_ptr->ident)
+ goto read_mismatch;
+
+ length = gcov_read_unsigned ();
+ if (length != gfi_ptr->lineno_checksum)
+ goto read_mismatch;
+
+ length = gcov_read_unsigned ();
+ if (length != gfi_ptr->cfg_checksum)
+ goto read_mismatch;
+
+ ci_ptr = gfi_ptr->ctrs;
+ for (t_ix = 0; t_ix < GCOV_COUNTERS; t_ix++)
+ {
+ gcov_merge_fn merge = gi_ptr->merge[t_ix];
+
+ if (!merge)
+ continue;
+
+ tag = gcov_read_unsigned ();
+ length = gcov_read_unsigned ();
+ if (tag != GCOV_TAG_FOR_COUNTER (t_ix)
+ || length != GCOV_TAG_COUNTER_LENGTH (ci_ptr->num))
+ goto read_mismatch;
+ (*merge) (ci_ptr->values, ci_ptr->num);
+ ci_ptr++;
+ }
+ if ((error = gcov_is_error ()))
+ goto read_error;
+ }
+
+ if (tag && tag != GCOV_TAG_MODULE_INFO)
+ {
+ read_mismatch:;
+ gcov_error ("profiling:%s:Merge mismatch for %s %u\n",
+ gi_filename, f_ix >= 0 ? "function" : "summary",
+ f_ix < 0 ? -1 - f_ix : f_ix);
+ return -1;
+ }
+ return 0;
+
+read_error:
+ gcov_error ("profiling:%s:%s merging\n", gi_filename,
+ error < 0 ? "Overflow": "Error");
+ return -1;
+}
+
+/* Write counters in GI_PTR to a gcda file starting from its current
+ location. */
+
+static void
+gcov_write_func_counters (struct gcov_info *gi_ptr)
+{
+ unsigned f_ix;
+
+ /* Write execution counts for each function. */
+ for (f_ix = 0; f_ix != gi_ptr->n_functions; f_ix++)
+ {
+ unsigned buffered = 0;
+ const struct gcov_fn_info *gfi_ptr;
+ const struct gcov_ctr_info *ci_ptr;
+ gcov_unsigned_t length;
+ unsigned t_ix;
+
+ if (fn_buffer && fn_buffer->fn_ix == f_ix)
+ {
+ /* Buffered data from another program. */
+ buffered = 1;
+ gfi_ptr = &fn_buffer->info;
+ length = GCOV_TAG_FUNCTION_LENGTH;
+ }
+ else
+ {
+ gfi_ptr = gi_ptr->functions[f_ix];
+ if (gfi_ptr && gfi_ptr->key == gi_ptr)
+ length = GCOV_TAG_FUNCTION_LENGTH;
+ else
+ length = 0;
+ }
+
+ gcov_write_tag_length (GCOV_TAG_FUNCTION, length);
+ if (!length)
+ continue;
+
+ gcov_write_unsigned (gfi_ptr->ident);
+ gcov_write_unsigned (gfi_ptr->lineno_checksum);
+ gcov_write_unsigned (gfi_ptr->cfg_checksum);
+
+ ci_ptr = gfi_ptr->ctrs;
+ for (t_ix = 0; t_ix < GCOV_COUNTERS; t_ix++)
+ {
+ gcov_unsigned_t n_counts;
+ gcov_type *c_ptr;
+
+ if (!gi_ptr->merge[t_ix])
+ continue;
+
+ n_counts = ci_ptr->num;
+ gcov_write_tag_length (GCOV_TAG_FOR_COUNTER (t_ix),
+ GCOV_TAG_COUNTER_LENGTH (n_counts));
+ c_ptr = ci_ptr->values;
+ while (n_counts--)
+ gcov_write_counter (*c_ptr++);
+ ci_ptr++;
+ }
+ if (buffered)
+ fn_buffer = free_fn_data (gi_ptr, fn_buffer, GCOV_COUNTERS);
+ }
+
+ gi_ptr->eof_pos = gcov_position ();
+ gcov_write_unsigned (0);
+}
+
+/* Write counters in GI_PTR and the summary in PRG to a gcda file. In
+ the case of appending to an existing file, SUMMARY_POS will be non-zero.
+ We will write the file starting from SUMMAY_POS. */
+
+static void
+gcov_exit_write_gcda (struct gcov_info *gi_ptr,
+ const struct gcov_summary *prg_p,
+ const gcov_position_t eof_pos,
+ const gcov_position_t summary_pos)
+
+{
+ struct gcov_summary_buffer *next_sum_buffer;
+
+ /* Write out the data. */
+ if (!eof_pos)
+ {
+ gcov_write_tag_length (GCOV_DATA_MAGIC, GCOV_VERSION);
+ gcov_write_unsigned (gi_ptr->stamp);
+ }
+
+ if (summary_pos)
+ gcov_seek (summary_pos);
+ gcc_assert (!summary_pos || summary_pos == gcov_position ());
+
+ /* Generate whole program statistics. */
+ gcov_write_summary (GCOV_TAG_PROGRAM_SUMMARY, prg_p);
+
+ /* Rewrite all the summaries that were after the summary we merged
+ into. This is necessary as the merged summary may have a different
+ size due to the number of non-zero histogram entries changing after
+ merging. */
+
+ while (sum_buffer)
+ {
+ gcov_write_summary (GCOV_TAG_PROGRAM_SUMMARY, &sum_buffer->summary);
+ next_sum_buffer = sum_buffer->next;
+ free (sum_buffer);
+ sum_buffer = next_sum_buffer;
+ }
+
+ /* Write the counters. */
+ gcov_write_func_counters (gi_ptr);
+}
+
+/* Helper function for merging summary.
+ Return -1 on error. Return 0 on success. */
+
+static int
+gcov_exit_merge_summary (const struct gcov_info *gi_ptr, struct gcov_summary *prg,
+ struct gcov_summary *this_prg, gcov_unsigned_t crc32,
+ struct gcov_summary *all_prg __attribute__ ((unused)))
+{
+ struct gcov_ctr_summary *cs_prg, *cs_tprg;
+ unsigned t_ix;
+#if !GCOV_LOCKED
+ /* summary for all instances of program. */
+ struct gcov_ctr_summary *cs_all;
+#endif
+
+ /* Merge the summaries. */
+ for (t_ix = 0; t_ix < GCOV_COUNTERS_SUMMABLE; t_ix++)
+ {
+ cs_prg = &(prg->ctrs[t_ix]);
+ cs_tprg = &(this_prg->ctrs[t_ix]);
+
+ if (gi_ptr->merge[t_ix])
+ {
+ int first = !cs_prg->runs;
+
+ if (!run_accounted)
+ cs_prg->runs++;
+ if (first)
+ cs_prg->num = cs_tprg->num;
+ cs_prg->sum_all += cs_tprg->sum_all;
+ if (cs_prg->run_max < cs_tprg->run_max)
+ cs_prg->run_max = cs_tprg->run_max;
+ cs_prg->sum_max += cs_tprg->run_max;
+ if (first)
+ memcpy (cs_prg->histogram, cs_tprg->histogram,
+ sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
+ else
+ gcov_histogram_merge (cs_prg->histogram, cs_tprg->histogram);
+ }
+ else if (cs_prg->runs)
+ {
+ gcov_error ("profiling:%s:Merge mismatch for summary.\n",
+ gi_filename);
+ return -1;
+ }
+#if !GCOV_LOCKED
+ cs_all = &all_prg->ctrs[t_ix];
+ if (!cs_all->runs && cs_prg->runs)
+ {
+ cs_all->num = cs_prg->num;
+ cs_all->runs = cs_prg->runs;
+ cs_all->sum_all = cs_prg->sum_all;
+ cs_all->run_max = cs_prg->run_max;
+ cs_all->sum_max = cs_prg->sum_max;
+ }
+ else if (!all_prg->checksum
+ /* Don't compare the histograms, which may have slight
+ variations depending on the order they were updated
+ due to the truncating integer divides used in the
+ merge. */
+ && (cs_all->num != cs_prg->num
+ || cs_all->runs != cs_prg->runs
+ || cs_all->sum_all != cs_prg->sum_all
+ || cs_all->run_max != cs_prg->run_max
+ || cs_all->sum_max != cs_prg->sum_max))
+ {
+ gcov_error ("profiling:%s:Data file mismatch - some "
+ "data files may have been concurrently "
+ "updated without locking support\n", gi_filename);
+ all_prg->checksum = ~0u;
+ }
+#endif
+ }
+
+ prg->checksum = crc32;
+
+ return 0;
+}
+
+/* Sort N entries in VALUE_ARRAY in descending order.
+ Each entry in VALUE_ARRAY has two values. The sorting
+ is based on the second value. */
+
+GCOV_LINKAGE void
+gcov_sort_n_vals (gcov_type *value_array, int n)
+{
+ int j, k;
+ for (j = 2; j < n; j += 2)
+ {
+ gcov_type cur_ent[2];
+ cur_ent[0] = value_array[j];
+ cur_ent[1] = value_array[j + 1];
+ k = j - 2;
+ while (k >= 0 && value_array[k + 1] < cur_ent[1])
+ {
+ value_array[k + 2] = value_array[k];
+ value_array[k + 3] = value_array[k+1];
+ k -= 2;
+ }
+ value_array[k + 2] = cur_ent[0];
+ value_array[k + 3] = cur_ent[1];
+ }
+}
+
+/* Sort the profile counters for all indirect call sites. Counters
+ for each call site are allocated in array COUNTERS. */
+
+static void
+gcov_sort_icall_topn_counter (const struct gcov_ctr_info *counters)
+{
+ int i;
+ gcov_type *values;
+ int n = counters->num;
+ gcc_assert (!(n % GCOV_ICALL_TOPN_NCOUNTS));
+
+ values = counters->values;
+
+ for (i = 0; i < n; i += GCOV_ICALL_TOPN_NCOUNTS)
+ {
+ gcov_type *value_array = &values[i + 1];
+ gcov_sort_n_vals (value_array, GCOV_ICALL_TOPN_NCOUNTS - 1);
+ }
+}
+
+static void
+gcov_sort_topn_counter_arrays (const struct gcov_info *gi_ptr)
+{
+ unsigned int i;
+ int f_ix;
+ const struct gcov_fn_info *gfi_ptr;
+ const struct gcov_ctr_info *ci_ptr;
+
+ for (f_ix = 0; (unsigned)f_ix != gi_ptr->n_functions; f_ix++)
+ {
+ gfi_ptr = gi_ptr->functions[f_ix];
+ ci_ptr = gfi_ptr->ctrs;
+ for (i = 0; i < GCOV_COUNTERS; i++)
+ {
+ if (!gcov_counter_active (gi_ptr, i))
+ continue;
+ if (i == GCOV_COUNTER_ICALL_TOPNV)
+ {
+ gcov_sort_icall_topn_counter (ci_ptr);
+ break;
+ }
+ ci_ptr++;
+ }
+ }
+}
+
+/* Dump the coverage counts for one gcov_info object. We merge with existing
+ counts when possible, to avoid growing the .da files ad infinitum. We use
+ this program's checksum to make sure we only accumulate whole program
+ statistics to the correct summary. An object file might be embedded
+ in two separate programs, and we must keep the two program
+ summaries separate. */
+
+static void
+gcov_exit_dump_gcov (struct gcov_info *gi_ptr, struct gcov_filename_aux *gf,
+ gcov_unsigned_t crc32, struct gcov_summary *all_prg,
+ struct gcov_summary *this_prg)
+{
+ struct gcov_summary prg; /* summary for this object over all program. */
+ int error;
+ gcov_unsigned_t tag;
+ gcov_position_t summary_pos = 0;
+ gcov_position_t eof_pos = 0;
+
+ fn_buffer = 0;
+ sum_buffer = 0;
+
+ gcov_sort_topn_counter_arrays (gi_ptr);
+
+ error = gcov_exit_open_gcda_file (gi_ptr, gf);
+ if (error == -1)
+ return;
+
+ tag = gcov_read_unsigned ();
+ if (tag)
+ {
+ /* Merge data from file. */
+ if (tag != GCOV_DATA_MAGIC)
+ {
+ gcov_error ("profiling:%s:Not a gcov data file\n", gi_filename);
+ goto read_fatal;
+ }
+ error = gcov_exit_merge_gcda (gi_ptr, &prg, this_prg, &summary_pos, &eof_pos,
+ crc32);
+ if (error == -1)
+ goto read_fatal;
+ }
+
+ gcov_rewrite ();
+
+ if (!summary_pos)
+ {
+ memset (&prg, 0, sizeof (prg));
+ summary_pos = eof_pos;
+ }
+
+ error = gcov_exit_merge_summary (gi_ptr, &prg, this_prg, crc32, all_prg);
+ if (error == -1)
+ goto read_fatal;
+
+ gcov_exit_write_gcda (gi_ptr, &prg, eof_pos, summary_pos);
+ /* fall through */
+
+read_fatal:;
+ while (fn_buffer)
+ fn_buffer = free_fn_data (gi_ptr, fn_buffer, GCOV_COUNTERS);
+
+ if ((error = gcov_close ()))
+ gcov_error (error < 0 ?
+ "profiling:%s:Overflow writing\n" :
+ "profiling:%s:Error writing\n",
+ gi_filename);
+}
+
+/* Write imported files (auxiliary modules) for primary module GI_PTR
+ into file GI_FILENAME. */
+
+static void
+gcov_write_import_file (char *gi_filename, struct gcov_info *gi_ptr)
+{
+ char *gi_imports_filename;
+ const char *gcov_suffix;
+ FILE *imports_file;
+ size_t prefix_length, suffix_length;
+
+ gcov_suffix = getenv ("GCOV_IMPORTS_SUFFIX");
+ if (!gcov_suffix || !strlen (gcov_suffix))
+ gcov_suffix = ".imports";
+ suffix_length = strlen (gcov_suffix);
+ prefix_length = strlen (gi_filename);
+ gi_imports_filename = (char *) alloca (prefix_length + suffix_length + 1);
+ memset (gi_imports_filename, 0, prefix_length + suffix_length + 1);
+ memcpy (gi_imports_filename, gi_filename, prefix_length);
+ memcpy (gi_imports_filename + prefix_length, gcov_suffix, suffix_length);
+ imports_file = fopen (gi_imports_filename, "w");
+ if (imports_file)
+ {
+ const struct dyn_imp_mod **imp_mods;
+ unsigned i, imp_len;
+ imp_mods = gcov_get_sorted_import_module_array (gi_ptr, &imp_len);
+ if (imp_mods)
+ {
+ for (i = 0; i < imp_len; i++)
+ {
+ fprintf (imports_file, "%s\n",
+ imp_mods[i]->imp_mod->mod_info->source_filename);
+ fprintf (imports_file, "%s%s\n",
+ imp_mods[i]->imp_mod->mod_info->da_filename, GCOV_DATA_SUFFIX);
+ }
+ free (imp_mods);
+ }
+ fclose (imports_file);
+ }
+}
+
+static void
+gcov_dump_module_info (struct gcov_filename_aux *gf)
+{
+ struct gcov_info *gi_ptr;
+
+ /* Compute the module groups and record whether there were any
+ counter fixups applied that require rewriting the counters. */
+ int changed = __gcov_compute_module_groups ();
+
+ /* Now write out module group info. */
+ for (gi_ptr = __gcov_list; gi_ptr; gi_ptr = gi_ptr->next)
+ {
+ int error;
+
+ if (gcov_exit_open_gcda_file (gi_ptr, gf) == -1)
+ continue;
+
+ if (changed)
+ {
+ /* Scan file to find the end of the summary section, which is
+ where we will start re-writing the counters. */
+ gcov_position_t summary_end_pos;
+ if (gcov_scan_summary_end (gi_ptr, &summary_end_pos) == -1)
+ gcov_error ("profiling:%s:Error scanning summaries\n",
+ gi_filename);
+ else
+ {
+ gcov_position_t eof_pos = gi_ptr->eof_pos;
+ gcov_rewrite ();
+ gcov_seek (summary_end_pos);
+ gcov_write_func_counters (gi_ptr);
+ gcc_assert (eof_pos == gi_ptr->eof_pos);
+ }
+ }
+ else
+ gcov_rewrite ();
+
+ /* Overwrite the zero word at the of the file. */
+ gcov_seek (gi_ptr->eof_pos);
+
+ gcov_write_module_infos (gi_ptr);
+ /* Write the end marker */
+ gcov_write_unsigned (0);
+ gcov_truncate ();
+
+ if ((error = gcov_close ()))
+ gcov_error (error < 0 ? "profiling:%s:Overflow writing\n" :
+ "profiling:%s:Error writing\n",
+ gi_filename);
+ gcov_write_import_file (gi_filename, gi_ptr);
+ }
+ __gcov_finalize_dyn_callgraph ();
+}
+
+/* Dump all the coverage counts for the program. It first computes program
+ summary and then traverses gcov_list list and dumps the gcov_info
+ objects one by one. */
+
+void
+gcov_exit (void)
+{
+ struct gcov_info *gi_ptr;
+ struct gcov_filename_aux gf;
+ gcov_unsigned_t crc32;
+ int dump_module_info = 0;
+ struct gcov_summary all_prg;
+ struct gcov_summary this_prg;
+
+ /* Prevent the counters from being dumped a second time on exit when the
+ application already wrote out the profile using __gcov_dump(). */
+ if (gcov_dump_complete)
+ return;
+
+ crc32 = gcov_exit_compute_summary (&this_prg);
+
+ allocate_filename_struct (&gf);
+#if !GCOV_LOCKED
+ memset (&all_prg, 0, sizeof (all_prg));
+#endif
+
+ /* Now merge each file. */
+ for (gi_ptr = __gcov_list; gi_ptr; gi_ptr = gi_ptr->next)
+ {
+ gcov_exit_dump_gcov (gi_ptr, &gf, crc32, &all_prg, &this_prg);
+
+ /* The IS_PRIMARY field is overloaded to indicate if this module
+ is FDO/LIPO. */
+ dump_module_info |= gi_ptr->mod_info->is_primary;
+ }
+ run_accounted = 1;
+
+ if (dump_module_info)
+ gcov_dump_module_info (&gf);
+
+ if (gi_filename)
+ free (gi_filename);
+}
+
+/* Reset all counters to zero. */
+
+void
+gcov_clear (void)
+{
+ const struct gcov_info *gi_ptr;
+
+ for (gi_ptr = __gcov_list; gi_ptr; gi_ptr = gi_ptr->next)
+ {
+ unsigned f_ix;
+
+ for (f_ix = 0; f_ix < gi_ptr->n_functions; f_ix++)
+ {
+ unsigned t_ix;
+ const struct gcov_fn_info *gfi_ptr = gi_ptr->functions[f_ix];
+
+ if (!gfi_ptr || gfi_ptr->key != gi_ptr)
+ continue;
+ const struct gcov_ctr_info *ci_ptr = gfi_ptr->ctrs;
+ for (t_ix = 0; t_ix != GCOV_COUNTERS; t_ix++)
+ {
+ if (!gi_ptr->merge[t_ix])
+ continue;
+
+ memset (ci_ptr->values, 0, sizeof (gcov_type) * ci_ptr->num);
+ ci_ptr++;
+ }
+ }
+ }
+}
+
+/* Add a new object file onto the bb chain. Invoked automatically
+ when running an object file's global ctors. */
+
+void
+__gcov_init (struct gcov_info *info)
+{
+#ifndef IN_GCOV_TOOL
+ if (!gcov_sampling_period_initialized)
+ {
+ const char* env_value_str = getenv ("GCOV_SAMPLING_PERIOD");
+ if (env_value_str)
+ {
+ int env_value_int = atoi(env_value_str);
+ if (env_value_int >= 1)
+ __gcov_sampling_period = env_value_int;
+ }
+ gcov_sampling_period_initialized = 1;
+ }
+#endif
+
+ if (!info->version || !info->n_functions)
+ return;
+ if (gcov_version (info, info->version, 0))
+ {
+ size_t filename_length = strlen(info->filename);
+
+ /* Refresh the longest file name information */
+ if (filename_length > gcov_max_filename)
+ gcov_max_filename = filename_length;
+
+ /* Assign the module ID (starting at 1). */
+ info->mod_info->ident = (++gcov_cur_module_id);
+ gcc_assert (EXTRACT_MODULE_ID_FROM_GLOBAL_ID (GEN_FUNC_GLOBAL_ID (
+ info->mod_info->ident, 0))
+ == info->mod_info->ident);
+
+ if (!__gcov_list)
+ atexit (gcov_exit);
+
+ info->next = __gcov_list;
+ __gcov_list = info;
+ }
+ info->version = 0;
+}
+
+#endif /* L_gcov */
+#endif /* inhibit_libc */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/README b/lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/README
new file mode 100644
index 0000000..7086a77
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/README
@@ -0,0 +1,14 @@
+This README file is copied into the directory for GCC-only header files
+when fixincludes is run by the makefile for GCC.
+
+Many of the files in this directory were automatically edited from the
+standard system header files by the fixincludes process. They are
+system-specific, and will not work on any other kind of system. They
+are also not part of GCC. The reason we have to do this is because
+GCC requires ANSI C headers and many vendors supply ANSI-incompatible
+headers.
+
+Because this is an automated process, sometimes headers get "fixed"
+that do not, strictly speaking, need a fix. As long as nothing is broken
+by the process, it is just an unfortunate collateral inconvenience.
+We would like to rectify it, if it is not "too inconvenient".
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/limits.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/limits.h
new file mode 100644
index 0000000..8c6a4d3
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/limits.h
@@ -0,0 +1,171 @@
+/* Copyright (C) 1992-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* This administrivia gets added to the beginning of limits.h
+ if the system has its own version of limits.h. */
+
+/* We use _GCC_LIMITS_H_ because we want this not to match
+ any macros that the system's limits.h uses for its own purposes. */
+#ifndef _GCC_LIMITS_H_ /* Terminated in limity.h. */
+#define _GCC_LIMITS_H_
+
+#ifndef _LIBC_LIMITS_H_
+/* Use "..." so that we find syslimits.h only in this same directory. */
+#include "syslimits.h"
+#endif
+/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef _LIMITS_H___
+#define _LIMITS_H___
+
+/* Number of bits in a `char'. */
+#undef CHAR_BIT
+#define CHAR_BIT __CHAR_BIT__
+
+/* Maximum length of a multibyte character. */
+#ifndef MB_LEN_MAX
+#define MB_LEN_MAX 1
+#endif
+
+/* Minimum and maximum values a `signed char' can hold. */
+#undef SCHAR_MIN
+#define SCHAR_MIN (-SCHAR_MAX - 1)
+#undef SCHAR_MAX
+#define SCHAR_MAX __SCHAR_MAX__
+
+/* Maximum value an `unsigned char' can hold. (Minimum is 0). */
+#undef UCHAR_MAX
+#if __SCHAR_MAX__ == __INT_MAX__
+# define UCHAR_MAX (SCHAR_MAX * 2U + 1U)
+#else
+# define UCHAR_MAX (SCHAR_MAX * 2 + 1)
+#endif
+
+/* Minimum and maximum values a `char' can hold. */
+#ifdef __CHAR_UNSIGNED__
+# undef CHAR_MIN
+# if __SCHAR_MAX__ == __INT_MAX__
+# define CHAR_MIN 0U
+# else
+# define CHAR_MIN 0
+# endif
+# undef CHAR_MAX
+# define CHAR_MAX UCHAR_MAX
+#else
+# undef CHAR_MIN
+# define CHAR_MIN SCHAR_MIN
+# undef CHAR_MAX
+# define CHAR_MAX SCHAR_MAX
+#endif
+
+/* Minimum and maximum values a `signed short int' can hold. */
+#undef SHRT_MIN
+#define SHRT_MIN (-SHRT_MAX - 1)
+#undef SHRT_MAX
+#define SHRT_MAX __SHRT_MAX__
+
+/* Maximum value an `unsigned short int' can hold. (Minimum is 0). */
+#undef USHRT_MAX
+#if __SHRT_MAX__ == __INT_MAX__
+# define USHRT_MAX (SHRT_MAX * 2U + 1U)
+#else
+# define USHRT_MAX (SHRT_MAX * 2 + 1)
+#endif
+
+/* Minimum and maximum values a `signed int' can hold. */
+#undef INT_MIN
+#define INT_MIN (-INT_MAX - 1)
+#undef INT_MAX
+#define INT_MAX __INT_MAX__
+
+/* Maximum value an `unsigned int' can hold. (Minimum is 0). */
+#undef UINT_MAX
+#define UINT_MAX (INT_MAX * 2U + 1U)
+
+/* Minimum and maximum values a `signed long int' can hold.
+ (Same as `int'). */
+#undef LONG_MIN
+#define LONG_MIN (-LONG_MAX - 1L)
+#undef LONG_MAX
+#define LONG_MAX __LONG_MAX__
+
+/* Maximum value an `unsigned long int' can hold. (Minimum is 0). */
+#undef ULONG_MAX
+#define ULONG_MAX (LONG_MAX * 2UL + 1UL)
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+/* Minimum and maximum values a `signed long long int' can hold. */
+# undef LLONG_MIN
+# define LLONG_MIN (-LLONG_MAX - 1LL)
+# undef LLONG_MAX
+# define LLONG_MAX __LONG_LONG_MAX__
+
+/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */
+# undef ULLONG_MAX
+# define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL)
+#endif
+
+#if defined (__GNU_LIBRARY__) ? defined (__USE_GNU) : !defined (__STRICT_ANSI__)
+/* Minimum and maximum values a `signed long long int' can hold. */
+# undef LONG_LONG_MIN
+# define LONG_LONG_MIN (-LONG_LONG_MAX - 1LL)
+# undef LONG_LONG_MAX
+# define LONG_LONG_MAX __LONG_LONG_MAX__
+
+/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */
+# undef ULONG_LONG_MAX
+# define ULONG_LONG_MAX (LONG_LONG_MAX * 2ULL + 1ULL)
+#endif
+
+#endif /* _LIMITS_H___ */
+/* This administrivia gets added to the end of limits.h
+ if the system has its own version of limits.h. */
+
+#else /* not _GCC_LIMITS_H_ */
+
+#ifdef _GCC_NEXT_LIMITS_H
+#include_next <limits.h> /* recurse down to the real one */
+#endif
+
+#endif /* not _GCC_LIMITS_H_ */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/linux/a.out.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/linux/a.out.h
new file mode 100644
index 0000000..0c0972b
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/linux/a.out.h
@@ -0,0 +1,229 @@
+/* DO NOT EDIT THIS FILE.
+
+ It has been auto-edited by fixincludes from:
+
+ "/tmp/ndk-xur/build/toolchain/prefix/sysroot/usr/include/linux/a.out.h"
+
+ This had to be done to correct non-standard usages in the
+ original, manufacturer supplied header file. */
+
+/****************************************************************************
+ ****************************************************************************
+ ***
+ *** This header was automatically generated from a Linux kernel header
+ *** of the same name, to make information necessary for userspace to
+ *** call into the kernel available to libc. It contains only constants,
+ *** structures, and macros generated from the original header, and thus,
+ *** contains no copyrightable information.
+ ***
+ ****************************************************************************
+ ****************************************************************************/
+#ifndef __A_OUT_GNU_H__
+#define __A_OUT_GNU_H__
+
+#define __GNU_EXEC_MACROS__
+
+#ifndef __STRUCT_EXEC_OVERRIDE__
+
+#include <asm/a.out.h>
+
+#endif
+
+enum machine_type {
+#ifdef M_OLDSUN2
+ M__OLDSUN2 = M_OLDSUN2,
+#else
+ M_OLDSUN2 = 0,
+#endif
+#ifdef M_68010
+ M__68010 = M_68010,
+#else
+ M_68010 = 1,
+#endif
+#ifdef M_68020
+ M__68020 = M_68020,
+#else
+ M_68020 = 2,
+#endif
+#ifdef M_SPARC
+ M__SPARC = M_SPARC,
+#else
+ M_SPARC = 3,
+#endif
+
+ M_386 = 100,
+ M_MIPS1 = 151,
+ M_MIPS2 = 152
+};
+
+#ifndef N_MAGIC
+#define N_MAGIC(exec) ((exec).a_info & 0xffff)
+#endif
+#define N_MACHTYPE(exec) ((enum machine_type)(((exec).a_info >> 16) & 0xff))
+#define N_FLAGS(exec) (((exec).a_info >> 24) & 0xff)
+#define N_SET_INFO(exec, magic, type, flags) ((exec).a_info = ((magic) & 0xffff) | (((int)(type) & 0xff) << 16) | (((flags) & 0xff) << 24))
+#define N_SET_MAGIC(exec, magic) ((exec).a_info = (((exec).a_info & 0xffff0000) | ((magic) & 0xffff)))
+
+#define N_SET_MACHTYPE(exec, machtype) ((exec).a_info = ((exec).a_info&0xff00ffff) | ((((int)(machtype))&0xff) << 16))
+
+#define N_SET_FLAGS(exec, flags) ((exec).a_info = ((exec).a_info&0x00ffffff) | (((flags) & 0xff) << 24))
+
+#define OMAGIC 0407
+
+#define NMAGIC 0410
+
+#define ZMAGIC 0413
+
+#define QMAGIC 0314
+
+#define CMAGIC 0421
+
+#ifndef N_BADMAG
+#define N_BADMAG(x) (N_MAGIC(x) != OMAGIC && N_MAGIC(x) != NMAGIC && N_MAGIC(x) != ZMAGIC && N_MAGIC(x) != QMAGIC)
+#endif
+
+#define _N_HDROFF(x) (1024 - sizeof (struct exec))
+
+#ifndef N_TXTOFF
+#define N_TXTOFF(x) (N_MAGIC(x) == ZMAGIC ? _N_HDROFF((x)) + sizeof (struct exec) : (N_MAGIC(x) == QMAGIC ? 0 : sizeof (struct exec)))
+#endif
+
+#ifndef N_DATOFF
+#define N_DATOFF(x) (N_TXTOFF(x) + (x).a_text)
+#endif
+
+#ifndef N_TRELOFF
+#define N_TRELOFF(x) (N_DATOFF(x) + (x).a_data)
+#endif
+
+#ifndef N_DRELOFF
+#define N_DRELOFF(x) (N_TRELOFF(x) + N_TRSIZE(x))
+#endif
+
+#ifndef N_SYMOFF
+#define N_SYMOFF(x) (N_DRELOFF(x) + N_DRSIZE(x))
+#endif
+
+#ifndef N_STROFF
+#define N_STROFF(x) (N_SYMOFF(x) + N_SYMSIZE(x))
+#endif
+
+#ifndef N_TXTADDR
+#define N_TXTADDR(x) (N_MAGIC(x) == QMAGIC ? PAGE_SIZE : 0)
+#endif
+
+#if defined(vax) || defined(hp300) || defined(pyr)
+#define SEGMENT_SIZE page_size
+#endif
+#ifdef sony
+#define SEGMENT_SIZE 0x2000
+#endif
+#ifdef is68k
+#define SEGMENT_SIZE 0x20000
+#endif
+#if defined(m68k) && defined(PORTAR)
+#define PAGE_SIZE 0x400
+#define SEGMENT_SIZE PAGE_SIZE
+#endif
+
+#ifdef __linux__
+#include <asm/page.h>
+#if defined(__i386__) || defined(__mc68000__)
+#define SEGMENT_SIZE 1024
+#else
+#ifndef SEGMENT_SIZE
+#define SEGMENT_SIZE PAGE_SIZE
+#endif
+#endif
+#endif
+
+#define _N_SEGMENT_ROUND(x) ALIGN(x, SEGMENT_SIZE)
+
+#define _N_TXTENDADDR(x) (N_TXTADDR(x)+(x).a_text)
+
+#ifndef N_DATADDR
+#define N_DATADDR(x) (N_MAGIC(x)==OMAGIC? (_N_TXTENDADDR(x)) : (_N_SEGMENT_ROUND (_N_TXTENDADDR(x))))
+#endif
+
+#ifndef N_BSSADDR
+#define N_BSSADDR(x) (N_DATADDR(x) + (x).a_data)
+#endif
+
+#ifndef N_NLIST_DECLARED
+struct nlist {
+ union {
+ char *n_name;
+ struct nlist *n_next;
+ long n_strx;
+ } n_un;
+ unsigned char n_type;
+ char n_other;
+ short n_desc;
+ unsigned long n_value;
+};
+#endif
+
+#ifndef N_UNDF
+#define N_UNDF 0
+#endif
+#ifndef N_ABS
+#define N_ABS 2
+#endif
+#ifndef N_TEXT
+#define N_TEXT 4
+#endif
+#ifndef N_DATA
+#define N_DATA 6
+#endif
+#ifndef N_BSS
+#define N_BSS 8
+#endif
+#ifndef N_FN
+#define N_FN 15
+#endif
+
+#ifndef N_EXT
+#define N_EXT 1
+#endif
+#ifndef N_TYPE
+#define N_TYPE 036
+#endif
+#ifndef N_STAB
+#define N_STAB 0340
+#endif
+
+#define N_INDR 0xa
+
+#define N_SETA 0x14
+#define N_SETT 0x16
+#define N_SETD 0x18
+#define N_SETB 0x1A
+
+#define N_SETV 0x1C
+
+#ifndef N_RELOCATION_INFO_DECLARED
+
+struct relocation_info
+{
+
+ int r_address;
+
+ unsigned int r_symbolnum:24;
+
+ unsigned int r_pcrel:1;
+
+ unsigned int r_length:2;
+
+ unsigned int r_extern:1;
+
+#ifdef NS32K
+ unsigned r_bsr:1;
+ unsigned r_disp:1;
+ unsigned r_pad:2;
+#else
+ unsigned int r_pad:4;
+#endif
+};
+#endif
+
+#endif
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/stdio.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/stdio.h
new file mode 100644
index 0000000..c73f7b2
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/stdio.h
@@ -0,0 +1,441 @@
+/* DO NOT EDIT THIS FILE.
+
+ It has been auto-edited by fixincludes from:
+
+ "/tmp/ndk-xur/build/toolchain/prefix/sysroot/usr/include/stdio.h"
+
+ This had to be done to correct non-standard usages in the
+ original, manufacturer supplied header file. */
+
+/* $OpenBSD: stdio.h,v 1.35 2006/01/13 18:10:09 miod Exp $ */
+/* $NetBSD: stdio.h,v 1.18 1996/04/25 18:29:21 jtc Exp $ */
+
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)stdio.h 5.17 (Berkeley) 6/3/91
+ */
+
+#ifndef _STDIO_H_
+#define _STDIO_H_
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+
+/* __gnuc_va_list and size_t must be defined by stdio.h according to Posix */
+#define __need___va_list
+#include <stdarg.h>
+
+/* note that this forces stddef.h to *only* define size_t */
+#define __need_size_t
+#include <stddef.h>
+
+#define __need_NULL
+#include <stddef.h>
+
+#define _FSTDIO /* Define for new stdio with functions. */
+
+typedef off_t fpos_t; /* stdio file position type */
+
+/*
+ * NB: to fit things in six character monocase externals, the stdio
+ * code uses the prefix `__s' for stdio objects, typically followed
+ * by a three-character attempt at a mnemonic.
+ */
+
+/* stdio buffers */
+struct __sbuf {
+ unsigned char *_base;
+ int _size;
+};
+
+/*
+ * stdio state variables.
+ *
+ * The following always hold:
+ *
+ * if (_flags&(__SLBF|__SWR)) == (__SLBF|__SWR),
+ * _lbfsize is -_bf._size, else _lbfsize is 0
+ * if _flags&__SRD, _w is 0
+ * if _flags&__SWR, _r is 0
+ *
+ * This ensures that the getc and putc macros (or inline functions) never
+ * try to write or read from a file that is in `read' or `write' mode.
+ * (Moreover, they can, and do, automatically switch from read mode to
+ * write mode, and back, on "r+" and "w+" files.)
+ *
+ * _lbfsize is used only to make the inline line-buffered output stream
+ * code as compact as possible.
+ *
+ * _ub, _up, and _ur are used when ungetc() pushes back more characters
+ * than fit in the current _bf, or when ungetc() pushes back a character
+ * that does not match the previous one in _bf. When this happens,
+ * _ub._base becomes non-nil (i.e., a stream has ungetc() data iff
+ * _ub._base!=NULL) and _up and _ur save the current values of _p and _r.
+ *
+ * NOTE: if you change this structure, you also need to update the
+ * std() initializer in findfp.c.
+ */
+typedef struct __sFILE {
+ unsigned char *_p; /* current position in (some) buffer */
+ int _r; /* read space left for getc() */
+ int _w; /* write space left for putc() */
+ short _flags; /* flags, below; this FILE is free if 0 */
+ short _file; /* fileno, if Unix descriptor, else -1 */
+ struct __sbuf _bf; /* the buffer (at least 1 byte, if !NULL) */
+ int _lbfsize; /* 0 or -_bf._size, for inline putc */
+
+ /* operations */
+ void *_cookie; /* cookie passed to io functions */
+ int (*_close)(void *);
+ int (*_read)(void *, char *, int);
+ fpos_t (*_seek)(void *, fpos_t, int);
+ int (*_write)(void *, const char *, int);
+
+ /* extension data, to avoid further ABI breakage */
+ struct __sbuf _ext;
+ /* data for long sequences of ungetc() */
+ unsigned char *_up; /* saved _p when _p is doing ungetc data */
+ int _ur; /* saved _r when _r is counting ungetc data */
+
+ /* tricks to meet minimum requirements even when malloc() fails */
+ unsigned char _ubuf[3]; /* guarantee an ungetc() buffer */
+ unsigned char _nbuf[1]; /* guarantee a getc() buffer */
+
+ /* separate buffer for fgetln() when line crosses buffer boundary */
+ struct __sbuf _lb; /* buffer for fgetln() */
+
+ /* Unix stdio files get aligned to block boundaries on fseek() */
+ int _blksize; /* stat.st_blksize (may be != _bf._size) */
+ fpos_t _offset; /* current lseek offset */
+} FILE;
+
+__BEGIN_DECLS
+extern FILE __sF[];
+__END_DECLS
+
+#define __SLBF 0x0001 /* line buffered */
+#define __SNBF 0x0002 /* unbuffered */
+#define __SRD 0x0004 /* OK to read */
+#define __SWR 0x0008 /* OK to write */
+ /* RD and WR are never simultaneously asserted */
+#define __SRW 0x0010 /* open for reading & writing */
+#define __SEOF 0x0020 /* found EOF */
+#define __SERR 0x0040 /* found error */
+#define __SMBF 0x0080 /* _buf is from malloc */
+#define __SAPP 0x0100 /* fdopen()ed in append mode */
+#define __SSTR 0x0200 /* this is an sprintf/snprintf string */
+#define __SOPT 0x0400 /* do fseek() optimisation */
+#define __SNPT 0x0800 /* do not do fseek() optimisation */
+#define __SOFF 0x1000 /* set iff _offset is in fact correct */
+#define __SMOD 0x2000 /* true => fgetln modified _p text */
+#define __SALC 0x4000 /* allocate string space dynamically */
+
+/*
+ * The following three definitions are for ANSI C, which took them
+ * from System V, which brilliantly took internal interface macros and
+ * made them official arguments to setvbuf(), without renaming them.
+ * Hence, these ugly _IOxxx names are *supposed* to appear in user code.
+ *
+ * Although numbered as their counterparts above, the implementation
+ * does not rely on this.
+ */
+#define _IOFBF 0 /* setvbuf should set fully buffered */
+#define _IOLBF 1 /* setvbuf should set line buffered */
+#define _IONBF 2 /* setvbuf should set unbuffered */
+
+#define BUFSIZ 1024 /* size of buffer used by setbuf */
+
+#define EOF (-1)
+
+/*
+ * FOPEN_MAX is a minimum maximum, and should be the number of descriptors
+ * that the kernel can provide without allocation of a resource that can
+ * fail without the process sleeping. Do not use this for anything.
+ */
+#define FOPEN_MAX 20 /* must be <= OPEN_MAX <sys/syslimits.h> */
+#define FILENAME_MAX 1024 /* must be <= PATH_MAX <sys/syslimits.h> */
+
+/* System V/ANSI C; this is the wrong way to do this, do *not* use these. */
+#if __BSD_VISIBLE || __XPG_VISIBLE
+#define P_tmpdir "/tmp/"
+#endif
+#define L_tmpnam 1024 /* XXX must be == PATH_MAX */
+#define TMP_MAX 308915776
+
+#ifndef SEEK_SET
+#define SEEK_SET 0 /* set file offset to offset */
+#endif
+#ifndef SEEK_CUR
+#define SEEK_CUR 1 /* set file offset to current plus offset */
+#endif
+#ifndef SEEK_END
+#define SEEK_END 2 /* set file offset to EOF plus offset */
+#endif
+
+#define stdin (&__sF[0])
+#define stdout (&__sF[1])
+#define stderr (&__sF[2])
+
+/*
+ * Functions defined in ANSI C standard.
+ */
+__BEGIN_DECLS
+void clearerr(FILE *);
+int fclose(FILE *);
+int feof(FILE *);
+int ferror(FILE *);
+int fflush(FILE *);
+int fgetc(FILE *);
+int fgetpos(FILE *, fpos_t *);
+char *fgets(char *, int, FILE *);
+FILE *fopen(const char *, const char *);
+int fprintf(FILE *, const char *, ...);
+int fputc(int, FILE *);
+int fputs(const char *, FILE *);
+size_t fread(void *, size_t, size_t, FILE *);
+FILE *freopen(const char *, const char *, FILE *);
+int fscanf(FILE *, const char *, ...);
+int fseek(FILE *, long, int);
+int fseeko(FILE *, off_t, int);
+int fsetpos(FILE *, const fpos_t *);
+long ftell(FILE *);
+off_t ftello(FILE *);
+size_t fwrite(const void *, size_t, size_t, FILE *);
+int getc(FILE *);
+int getchar(void);
+char *gets(char *);
+#if __BSD_VISIBLE && !defined(__SYS_ERRLIST)
+#define __SYS_ERRLIST
+
+extern int sys_nerr; /* perror(3) external variables */
+extern char *sys_errlist[];
+#endif
+void perror(const char *);
+int printf(const char *, ...);
+int putc(int, FILE *);
+int putchar(int);
+int puts(const char *);
+int remove(const char *);
+int rename(const char *, const char *);
+void rewind(FILE *);
+int scanf(const char *, ...);
+void setbuf(FILE *, char *);
+int setvbuf(FILE *, char *, int, size_t);
+int sprintf(char *, const char *, ...);
+int sscanf(const char *, const char *, ...);
+FILE *tmpfile(void);
+char *tmpnam(char *);
+int ungetc(int, FILE *);
+int vfprintf(FILE *, const char *, __gnuc_va_list);
+int vprintf(const char *, __gnuc_va_list);
+int vsprintf(char *, const char *, __gnuc_va_list);
+
+#if __ISO_C_VISIBLE >= 1999 || __BSD_VISIBLE
+int snprintf(char *, size_t, const char *, ...)
+ __attribute__((__format__ (printf, 3, 4)))
+ __attribute__((__nonnull__ (3)));
+int vfscanf(FILE *, const char *, __gnuc_va_list)
+ __attribute__((__format__ (scanf, 2, 0)))
+ __attribute__((__nonnull__ (2)));
+int vscanf(const char *, __gnuc_va_list)
+ __attribute__((__format__ (scanf, 1, 0)))
+ __attribute__((__nonnull__ (1)));
+int vsnprintf(char *, size_t, const char *, __gnuc_va_list)
+ __attribute__((__format__ (printf, 3, 0)))
+ __attribute__((__nonnull__ (3)));
+int vsscanf(const char *, const char *, __gnuc_va_list)
+ __attribute__((__format__ (scanf, 2, 0)))
+ __attribute__((__nonnull__ (2)));
+#endif /* __ISO_C_VISIBLE >= 1999 || __BSD_VISIBLE */
+
+__END_DECLS
+
+
+/*
+ * Functions defined in POSIX 1003.1.
+ */
+#if __BSD_VISIBLE || __POSIX_VISIBLE || __XPG_VISIBLE
+#define L_ctermid 1024 /* size for ctermid(); PATH_MAX */
+#define L_cuserid 9 /* size for cuserid(); UT_NAMESIZE + 1 */
+
+__BEGIN_DECLS
+#if 0 /* MISSING FROM BIONIC */
+char *ctermid(char *);
+char *cuserid(char *);
+#endif /* MISSING */
+FILE *fdopen(int, const char *);
+int fileno(FILE *);
+
+#if (__POSIX_VISIBLE >= 199209)
+int pclose(FILE *);
+FILE *popen(const char *, const char *);
+#endif
+
+#if __POSIX_VISIBLE >= 199506
+void flockfile(FILE *);
+int ftrylockfile(FILE *);
+void funlockfile(FILE *);
+
+/*
+ * These are normally used through macros as defined below, but POSIX
+ * requires functions as well.
+ */
+int getc_unlocked(FILE *);
+int getchar_unlocked(void);
+int putc_unlocked(int, FILE *);
+int putchar_unlocked(int);
+#endif /* __POSIX_VISIBLE >= 199506 */
+
+#if __XPG_VISIBLE
+char *tempnam(const char *, const char *);
+#endif
+__END_DECLS
+
+#endif /* __BSD_VISIBLE || __POSIX_VISIBLE || __XPG_VISIBLE */
+
+/*
+ * Routines that are purely local.
+ */
+#if __BSD_VISIBLE
+__BEGIN_DECLS
+int asprintf(char **, const char *, ...)
+ __attribute__((__format__ (printf, 2, 3)))
+ __attribute__((__nonnull__ (2)));
+char *fgetln(FILE *, size_t *);
+int fpurge(FILE *);
+int getw(FILE *);
+int putw(int, FILE *);
+void setbuffer(FILE *, char *, int);
+int setlinebuf(FILE *);
+int vasprintf(char **, const char *, __gnuc_va_list)
+ __attribute__((__format__ (printf, 2, 0)))
+ __attribute__((__nonnull__ (2)));
+__END_DECLS
+
+/*
+ * Stdio function-access interface.
+ */
+__BEGIN_DECLS
+FILE *funopen(const void *,
+ int (*)(void *, char *, int),
+ int (*)(void *, const char *, int),
+ fpos_t (*)(void *, fpos_t, int),
+ int (*)(void *));
+__END_DECLS
+#define fropen(cookie, fn) funopen(cookie, fn, 0, 0, 0)
+#define fwopen(cookie, fn) funopen(cookie, 0, fn, 0, 0)
+#endif /* __BSD_VISIBLE */
+
+/*
+ * Functions internal to the implementation.
+ */
+__BEGIN_DECLS
+int __srget(FILE *);
+int __swbuf(int, FILE *);
+__END_DECLS
+
+/*
+ * The __sfoo macros are here so that we can
+ * define function versions in the C library.
+ */
+#define __sgetc(p) (--(p)->_r < 0 ? __srget(p) : (int)(*(p)->_p++))
+#if defined(__GNUC__)
+static __inline int __sputc(int _c, FILE *_p) {
+ if (--_p->_w >= 0 || (_p->_w >= _p->_lbfsize && (char)_c != '\n'))
+ return (*_p->_p++ = _c);
+ else
+ return (__swbuf(_c, _p));
+}
+#else
+/*
+ * This has been tuned to generate reasonable code on the vax using pcc.
+ */
+#define __sputc(c, p) \
+ (--(p)->_w < 0 ? \
+ (p)->_w >= (p)->_lbfsize ? \
+ (*(p)->_p = (c)), *(p)->_p != '\n' ? \
+ (int)*(p)->_p++ : \
+ __swbuf('\n', p) : \
+ __swbuf((int)(c), p) : \
+ (*(p)->_p = (c), (int)*(p)->_p++))
+#endif
+
+#define __sfeof(p) (((p)->_flags & __SEOF) != 0)
+#define __sferror(p) (((p)->_flags & __SERR) != 0)
+#define __sclearerr(p) ((void)((p)->_flags &= ~(__SERR|__SEOF)))
+#define __sfileno(p) ((p)->_file)
+
+#define feof(p) __sfeof(p)
+#define ferror(p) __sferror(p)
+
+#ifndef _POSIX_THREADS
+#define clearerr(p) __sclearerr(p)
+#endif
+
+#if __POSIX_VISIBLE
+#define fileno(p) __sfileno(p)
+#endif
+
+#ifndef lint
+#ifndef _POSIX_THREADS
+#define getc(fp) __sgetc(fp)
+#endif /* _POSIX_THREADS */
+#define getc_unlocked(fp) __sgetc(fp)
+/*
+ * The macro implementations of putc and putc_unlocked are not
+ * fully POSIX compliant; they do not set errno on failure
+ */
+#if __BSD_VISIBLE
+#ifndef _POSIX_THREADS
+#define putc(x, fp) __sputc(x, fp)
+#endif /* _POSIX_THREADS */
+#define putc_unlocked(x, fp) __sputc(x, fp)
+#endif /* __BSD_VISIBLE */
+#endif /* lint */
+
+#define getchar() getc(stdin)
+#define putchar(x) putc(x, stdout)
+#define getchar_unlocked() getc_unlocked(stdin)
+#define putchar_unlocked(c) putc_unlocked(c, stdout)
+
+#ifdef _GNU_SOURCE
+/*
+ * glibc defines dprintf(int, const char*, ...), which is poorly named
+ * and likely to conflict with locally defined debugging printfs
+ * fdprintf is a better name, and some programs that use fdprintf use a
+ * #define fdprintf dprintf for compatibility
+ */
+int fdprintf(int, const char*, ...);
+int vfdprintf(int, const char*, __gnuc_va_list);
+#endif /* _GNU_SOURCE */
+
+#endif /* _STDIO_H_ */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/syslimits.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/syslimits.h
new file mode 100644
index 0000000..a362802
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include-fixed/syslimits.h
@@ -0,0 +1,8 @@
+/* syslimits.h stands for the system's own limits.h file.
+ If we can use it ok unmodified, then we install this text.
+ If fixincludes fixes it, then the fixed version is installed
+ instead of this text. */
+
+#define _GCC_NEXT_LIMITS_H /* tell gcc's limits.h to recurse */
+#include_next <limits.h>
+#undef _GCC_NEXT_LIMITS_H
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/arm_acle.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/arm_acle.h
new file mode 100644
index 0000000..aaa7aff
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/arm_acle.h
@@ -0,0 +1,100 @@
+/* ARM Non-NEON ACLE intrinsics include file.
+
+ Copyright (C) 2013-2014 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _GCC_ARM_ACLE_H
+#define _GCC_ARM_ACLE_H
+
+#include <stdint.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __ARM_FEATURE_CRC32
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32b (uint32_t __a, uint8_t __b)
+{
+ return __builtin_arm_crc32b (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32h (uint32_t __a, uint16_t __b)
+{
+ return __builtin_arm_crc32h (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32w (uint32_t __a, uint32_t __b)
+{
+ return __builtin_arm_crc32w (__a, __b);
+}
+
+#ifdef __ARM_32BIT_STATE
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32d (uint32_t __a, uint64_t __b)
+{
+ uint32_t __d;
+
+ __d = __crc32w (__crc32w (__a, __b & 0xffffffffULL), __b >> 32);
+ return __d;
+}
+#endif
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32cb (uint32_t __a, uint8_t __b)
+{
+ return __builtin_arm_crc32cb (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32ch (uint32_t __a, uint16_t __b)
+{
+ return __builtin_arm_crc32ch (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32cw (uint32_t __a, uint32_t __b)
+{
+ return __builtin_arm_crc32cw (__a, __b);
+}
+
+#ifdef __ARM_32BIT_STATE
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__crc32cd (uint32_t __a, uint64_t __b)
+{
+ uint32_t __d;
+
+ __d = __crc32cw (__crc32cw (__a, __b & 0xffffffffULL), __b >> 32);
+ return __d;
+}
+#endif
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/arm_neon.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/arm_neon.h
new file mode 100644
index 0000000..9573543
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/arm_neon.h
@@ -0,0 +1,13817 @@
+/* ARM NEON intrinsics include file.
+
+ Copyright (C) 2006-2014 Free Software Foundation, Inc.
+ Contributed by CodeSourcery.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _GCC_ARM_NEON_H
+#define _GCC_ARM_NEON_H 1
+
+#ifndef __ARM_NEON__
+#error You must enable NEON instructions (e.g. -mfloat-abi=softfp -mfpu=neon) to use arm_neon.h
+#else
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+typedef __builtin_neon_qi int8x8_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_hi int16x4_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_si int32x2_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_di int64x1_t;
+typedef __builtin_neon_hf float16x4_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_sf float32x2_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_poly8 poly8x8_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_poly16 poly16x4_t __attribute__ ((__vector_size__ (8)));
+#ifdef __ARM_FEATURE_CRYPTO
+typedef __builtin_neon_poly64 poly64x1_t;
+#endif
+typedef __builtin_neon_uqi uint8x8_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_uhi uint16x4_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_usi uint32x2_t __attribute__ ((__vector_size__ (8)));
+typedef __builtin_neon_udi uint64x1_t;
+typedef __builtin_neon_qi int8x16_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_hi int16x8_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_si int32x4_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_di int64x2_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_sf float32x4_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_poly8 poly8x16_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_poly16 poly16x8_t __attribute__ ((__vector_size__ (16)));
+#ifdef __ARM_FEATURE_CRYPTO
+typedef __builtin_neon_poly64 poly64x2_t __attribute__ ((__vector_size__ (16)));
+#endif
+typedef __builtin_neon_uqi uint8x16_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_uhi uint16x8_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_usi uint32x4_t __attribute__ ((__vector_size__ (16)));
+typedef __builtin_neon_udi uint64x2_t __attribute__ ((__vector_size__ (16)));
+
+typedef float float32_t;
+typedef __builtin_neon_poly8 poly8_t;
+typedef __builtin_neon_poly16 poly16_t;
+#ifdef __ARM_FEATURE_CRYPTO
+typedef __builtin_neon_poly64 poly64_t;
+typedef __builtin_neon_poly128 poly128_t;
+#endif
+
+typedef struct int8x8x2_t
+{
+ int8x8_t val[2];
+} int8x8x2_t;
+
+typedef struct int8x16x2_t
+{
+ int8x16_t val[2];
+} int8x16x2_t;
+
+typedef struct int16x4x2_t
+{
+ int16x4_t val[2];
+} int16x4x2_t;
+
+typedef struct int16x8x2_t
+{
+ int16x8_t val[2];
+} int16x8x2_t;
+
+typedef struct int32x2x2_t
+{
+ int32x2_t val[2];
+} int32x2x2_t;
+
+typedef struct int32x4x2_t
+{
+ int32x4_t val[2];
+} int32x4x2_t;
+
+typedef struct int64x1x2_t
+{
+ int64x1_t val[2];
+} int64x1x2_t;
+
+typedef struct int64x2x2_t
+{
+ int64x2_t val[2];
+} int64x2x2_t;
+
+typedef struct uint8x8x2_t
+{
+ uint8x8_t val[2];
+} uint8x8x2_t;
+
+typedef struct uint8x16x2_t
+{
+ uint8x16_t val[2];
+} uint8x16x2_t;
+
+typedef struct uint16x4x2_t
+{
+ uint16x4_t val[2];
+} uint16x4x2_t;
+
+typedef struct uint16x8x2_t
+{
+ uint16x8_t val[2];
+} uint16x8x2_t;
+
+typedef struct uint32x2x2_t
+{
+ uint32x2_t val[2];
+} uint32x2x2_t;
+
+typedef struct uint32x4x2_t
+{
+ uint32x4_t val[2];
+} uint32x4x2_t;
+
+typedef struct uint64x1x2_t
+{
+ uint64x1_t val[2];
+} uint64x1x2_t;
+
+typedef struct uint64x2x2_t
+{
+ uint64x2_t val[2];
+} uint64x2x2_t;
+
+typedef struct float32x2x2_t
+{
+ float32x2_t val[2];
+} float32x2x2_t;
+
+typedef struct float32x4x2_t
+{
+ float32x4_t val[2];
+} float32x4x2_t;
+
+typedef struct poly8x8x2_t
+{
+ poly8x8_t val[2];
+} poly8x8x2_t;
+
+typedef struct poly8x16x2_t
+{
+ poly8x16_t val[2];
+} poly8x16x2_t;
+
+typedef struct poly16x4x2_t
+{
+ poly16x4_t val[2];
+} poly16x4x2_t;
+
+typedef struct poly16x8x2_t
+{
+ poly16x8_t val[2];
+} poly16x8x2_t;
+
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x1x2_t
+{
+ poly64x1_t val[2];
+} poly64x1x2_t;
+#endif
+
+
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x2x2_t
+{
+ poly64x2_t val[2];
+} poly64x2x2_t;
+#endif
+
+
+typedef struct int8x8x3_t
+{
+ int8x8_t val[3];
+} int8x8x3_t;
+
+typedef struct int8x16x3_t
+{
+ int8x16_t val[3];
+} int8x16x3_t;
+
+typedef struct int16x4x3_t
+{
+ int16x4_t val[3];
+} int16x4x3_t;
+
+typedef struct int16x8x3_t
+{
+ int16x8_t val[3];
+} int16x8x3_t;
+
+typedef struct int32x2x3_t
+{
+ int32x2_t val[3];
+} int32x2x3_t;
+
+typedef struct int32x4x3_t
+{
+ int32x4_t val[3];
+} int32x4x3_t;
+
+typedef struct int64x1x3_t
+{
+ int64x1_t val[3];
+} int64x1x3_t;
+
+typedef struct int64x2x3_t
+{
+ int64x2_t val[3];
+} int64x2x3_t;
+
+typedef struct uint8x8x3_t
+{
+ uint8x8_t val[3];
+} uint8x8x3_t;
+
+typedef struct uint8x16x3_t
+{
+ uint8x16_t val[3];
+} uint8x16x3_t;
+
+typedef struct uint16x4x3_t
+{
+ uint16x4_t val[3];
+} uint16x4x3_t;
+
+typedef struct uint16x8x3_t
+{
+ uint16x8_t val[3];
+} uint16x8x3_t;
+
+typedef struct uint32x2x3_t
+{
+ uint32x2_t val[3];
+} uint32x2x3_t;
+
+typedef struct uint32x4x3_t
+{
+ uint32x4_t val[3];
+} uint32x4x3_t;
+
+typedef struct uint64x1x3_t
+{
+ uint64x1_t val[3];
+} uint64x1x3_t;
+
+typedef struct uint64x2x3_t
+{
+ uint64x2_t val[3];
+} uint64x2x3_t;
+
+typedef struct float32x2x3_t
+{
+ float32x2_t val[3];
+} float32x2x3_t;
+
+typedef struct float32x4x3_t
+{
+ float32x4_t val[3];
+} float32x4x3_t;
+
+typedef struct poly8x8x3_t
+{
+ poly8x8_t val[3];
+} poly8x8x3_t;
+
+typedef struct poly8x16x3_t
+{
+ poly8x16_t val[3];
+} poly8x16x3_t;
+
+typedef struct poly16x4x3_t
+{
+ poly16x4_t val[3];
+} poly16x4x3_t;
+
+typedef struct poly16x8x3_t
+{
+ poly16x8_t val[3];
+} poly16x8x3_t;
+
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x1x3_t
+{
+ poly64x1_t val[3];
+} poly64x1x3_t;
+#endif
+
+
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x2x3_t
+{
+ poly64x2_t val[3];
+} poly64x2x3_t;
+#endif
+
+
+typedef struct int8x8x4_t
+{
+ int8x8_t val[4];
+} int8x8x4_t;
+
+typedef struct int8x16x4_t
+{
+ int8x16_t val[4];
+} int8x16x4_t;
+
+typedef struct int16x4x4_t
+{
+ int16x4_t val[4];
+} int16x4x4_t;
+
+typedef struct int16x8x4_t
+{
+ int16x8_t val[4];
+} int16x8x4_t;
+
+typedef struct int32x2x4_t
+{
+ int32x2_t val[4];
+} int32x2x4_t;
+
+typedef struct int32x4x4_t
+{
+ int32x4_t val[4];
+} int32x4x4_t;
+
+typedef struct int64x1x4_t
+{
+ int64x1_t val[4];
+} int64x1x4_t;
+
+typedef struct int64x2x4_t
+{
+ int64x2_t val[4];
+} int64x2x4_t;
+
+typedef struct uint8x8x4_t
+{
+ uint8x8_t val[4];
+} uint8x8x4_t;
+
+typedef struct uint8x16x4_t
+{
+ uint8x16_t val[4];
+} uint8x16x4_t;
+
+typedef struct uint16x4x4_t
+{
+ uint16x4_t val[4];
+} uint16x4x4_t;
+
+typedef struct uint16x8x4_t
+{
+ uint16x8_t val[4];
+} uint16x8x4_t;
+
+typedef struct uint32x2x4_t
+{
+ uint32x2_t val[4];
+} uint32x2x4_t;
+
+typedef struct uint32x4x4_t
+{
+ uint32x4_t val[4];
+} uint32x4x4_t;
+
+typedef struct uint64x1x4_t
+{
+ uint64x1_t val[4];
+} uint64x1x4_t;
+
+typedef struct uint64x2x4_t
+{
+ uint64x2_t val[4];
+} uint64x2x4_t;
+
+typedef struct float32x2x4_t
+{
+ float32x2_t val[4];
+} float32x2x4_t;
+
+typedef struct float32x4x4_t
+{
+ float32x4_t val[4];
+} float32x4x4_t;
+
+typedef struct poly8x8x4_t
+{
+ poly8x8_t val[4];
+} poly8x8x4_t;
+
+typedef struct poly8x16x4_t
+{
+ poly8x16_t val[4];
+} poly8x16x4_t;
+
+typedef struct poly16x4x4_t
+{
+ poly16x4_t val[4];
+} poly16x4x4_t;
+
+typedef struct poly16x8x4_t
+{
+ poly16x8_t val[4];
+} poly16x8x4_t;
+
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x1x4_t
+{
+ poly64x1_t val[4];
+} poly64x1x4_t;
+#endif
+
+
+#ifdef __ARM_FEATURE_CRYPTO
+typedef struct poly64x2x4_t
+{
+ poly64x2_t val[4];
+} poly64x2x4_t;
+#endif
+
+
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vaddv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vaddv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vaddv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vadd_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vaddv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vaddv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vadd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vadddi (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vadd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vadddi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vaddv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vaddv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vaddv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vaddv2di (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vaddq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vaddv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vaddv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vaddv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vaddv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vaddv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vaddlv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vaddlv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vaddlv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vaddlv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vaddlv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vaddlv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vaddw_s8 (int16x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vaddwv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vaddw_s16 (int32x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vaddwv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vaddw_s32 (int64x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vaddwv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vaddw_u8 (uint16x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vaddwv8qi ((int16x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vaddw_u16 (uint32x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vaddwv4hi ((int32x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vaddw_u32 (uint64x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vaddwv2si ((int64x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vhadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vhaddv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vhadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vhaddv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vhadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vhaddv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vhadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vhaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vhadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vhaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vhadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vhaddv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vhaddv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vhaddv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vhaddv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vhaddv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vhaddv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vhaddv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrhadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vhaddv8qi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrhadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vhaddv4hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrhadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vhaddv2si (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrhadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vhaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrhadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vhaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrhadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vhaddv2si ((int32x2_t) __a, (int32x2_t) __b, 4);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrhaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vhaddv16qi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrhaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vhaddv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrhaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vhaddv4si (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vhaddv16qi ((int8x16_t) __a, (int8x16_t) __b, 4);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vhaddv8hi ((int16x8_t) __a, (int16x8_t) __b, 4);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vhaddv4si ((int32x4_t) __a, (int32x4_t) __b, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vqaddv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqaddv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqaddv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqadd_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vqadddi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vqaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vqaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vqaddv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqadd_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vqadddi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vqaddv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqaddv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqaddv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqaddq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqaddv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vqaddv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vqaddv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vqaddv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqaddq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vqaddv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vaddhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vaddhnv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vaddhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vaddhnv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vaddhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vaddhnv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vaddhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vaddhnv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vaddhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vaddhnv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vaddhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vaddhnv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vraddhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vaddhnv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vraddhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vaddhnv4si (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vraddhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vaddhnv2di (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vraddhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vaddhnv8hi ((int16x8_t) __a, (int16x8_t) __b, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vraddhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vaddhnv4si ((int32x4_t) __a, (int32x4_t) __b, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vraddhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vaddhnv2di ((int64x2_t) __a, (int64x2_t) __b, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmul_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vmulv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vmulv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vmulv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vmulv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmul_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vmulv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vmulv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vmulv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmul_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (poly8x8_t)__builtin_neon_vmulv8qi ((int8x8_t) __a, (int8x8_t) __b, 2);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmulq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vmulv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vmulv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmulv4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vmulv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmulq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vmulv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vmulv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmulv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmulq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (poly8x16_t)__builtin_neon_vmulv16qi ((int8x16_t) __a, (int8x16_t) __b, 2);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqdmulhv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqdmulhv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqdmulhv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmulhv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqdmulhv4hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqdmulhv2si (__a, __b, 5);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqdmulhv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmulhv4si (__a, __b, 5);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmull_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vmullv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmullv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vmullv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmull_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vmullv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmullv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vmullv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vmull_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (poly16x8_t)__builtin_neon_vmullv8qi ((int8x8_t) __a, (int8x8_t) __b, 2);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmullv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqdmullv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmla_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vmlav8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmla_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vmlav4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmla_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vmlav2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmla_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return (float32x2_t)__builtin_neon_vmlav2sf (__a, __b, __c, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmla_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vmlav8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmla_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vmlav4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmla_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vmlav2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmlaq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return (int8x16_t)__builtin_neon_vmlav16qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlaq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmlav8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlaq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlav4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return (float32x4_t)__builtin_neon_vmlav4sf (__a, __b, __c, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmlaq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return (uint8x16_t)__builtin_neon_vmlav16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlaq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmlav8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlaq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlav4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlal_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmlalv8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlalv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vmlalv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlal_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmlalv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlalv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vmlalv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmlalv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmlalv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmls_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vmlsv8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmls_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vmlsv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmls_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vmlsv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmls_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return (float32x2_t)__builtin_neon_vmlsv2sf (__a, __b, __c, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmls_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vmlsv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmls_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vmlsv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmls_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vmlsv2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmlsq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return (int8x16_t)__builtin_neon_vmlsv16qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmlsv8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlsv4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return (float32x4_t)__builtin_neon_vmlsv4sf (__a, __b, __c, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmlsq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return (uint8x16_t)__builtin_neon_vmlsv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmlsv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlsv4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsl_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmlslv8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlslv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vmlslv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsl_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmlslv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlslv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vmlslv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmlslv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmlslv2si (__a, __b, __c, 1);
+}
+
+#ifdef __ARM_FEATURE_FMA
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vfma_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return (float32x2_t)__builtin_neon_vfmav2sf (__a, __b, __c, 3);
+}
+
+#endif
+#ifdef __ARM_FEATURE_FMA
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vfmaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return (float32x4_t)__builtin_neon_vfmav4sf (__a, __b, __c, 3);
+}
+
+#endif
+#ifdef __ARM_FEATURE_FMA
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vfms_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return (float32x2_t)__builtin_neon_vfmsv2sf (__a, __b, __c, 3);
+}
+
+#endif
+#ifdef __ARM_FEATURE_FMA
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vfmsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return (float32x4_t)__builtin_neon_vfmsv4sf (__a, __b, __c, 3);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrndn_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrintnv2sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndqn_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrintnv4sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrnda_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrintav2sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndqa_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrintav4sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrndp_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrintpv2sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndqp_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrintpv4sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrndm_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrintmv2sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndqm_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrintmv4sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrnd_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrintzv2sf (__a);
+}
+
+#endif
+#if __ARM_ARCH >= 8
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrndq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrintzv4sf (__a);
+}
+
+#endif
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vsubv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vsubv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vsubv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vsub_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vsubv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vsubv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vsubv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vsubv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsub_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vsubdi (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsub_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vsubdi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vsubv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vsubv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vsubv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vsubv2di (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vsubq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vsubv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vsubv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vsubv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vsubv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vsubv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vsublv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vsublv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vsublv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vsublv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vsublv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vsublv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsubw_s8 (int16x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vsubwv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsubw_s16 (int32x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vsubwv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsubw_s32 (int64x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vsubwv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsubw_u8 (uint16x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vsubwv8qi ((int16x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsubw_u16 (uint32x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vsubwv4hi ((int32x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsubw_u32 (uint64x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vsubwv2si ((int64x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vhsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vhsubv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vhsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vhsubv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vhsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vhsubv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vhsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vhsubv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vhsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vhsubv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vhsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vhsubv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vhsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vhsubv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vhsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vhsubv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vhsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vhsubv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vhsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vhsubv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vhsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vhsubv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vhsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vhsubv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqsub_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vqsubv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqsub_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqsubv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqsub_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqsubv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqsub_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vqsubdi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqsub_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vqsubv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqsub_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vqsubv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqsub_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vqsubv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqsub_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vqsubdi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqsubq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vqsubv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqsubq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqsubv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqsubq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqsubv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqsubq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqsubv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqsubq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vqsubv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqsubq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vqsubv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqsubq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vqsubv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqsubq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vqsubv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsubhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vsubhnv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsubhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vsubhnv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsubhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vsubhnv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsubhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vsubhnv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsubhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vsubhnv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsubhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vsubhnv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrsubhn_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vsubhnv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrsubhn_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vsubhnv4si (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrsubhn_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vsubhnv2di (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrsubhn_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vsubhnv8hi ((int16x8_t) __a, (int16x8_t) __b, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrsubhn_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vsubhnv4si ((int32x4_t) __a, (int32x4_t) __b, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsubhn_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vsubhnv2di ((int64x2_t) __a, (int64x2_t) __b, 4);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vceqv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vceq_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vceqv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vceqv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vceqv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vceqv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vceq_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vceqv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vceq_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vceqv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vceq_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vceqv8qi ((int8x8_t) __a, (int8x8_t) __b, 2);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vceqv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vceqq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vceqv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vceqv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vceqv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vceqv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vceqq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vceqv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vceqq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vceqv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vceqq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vceqv16qi ((int8x16_t) __a, (int8x16_t) __b, 2);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcge_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgev8qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcge_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgev4hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgev2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgev2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcge_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgeuv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcge_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgeuv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcge_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgeuv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgeq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgev16qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgeq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgev8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgev4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgev4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgeq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgeuv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgeq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgeuv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgeq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgeuv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcle_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgev8qi (__b, __a, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcle_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgev4hi (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgev2si (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgev2sf (__b, __a, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcle_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgeuv8qi ((int8x8_t) __b, (int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcle_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgeuv4hi ((int16x4_t) __b, (int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcle_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgeuv2si ((int32x2_t) __b, (int32x2_t) __a, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcleq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgev16qi (__b, __a, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcleq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgev8hi (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgev4si (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgev4sf (__b, __a, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcleq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgeuv16qi ((int8x16_t) __b, (int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcleq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgeuv8hi ((int16x8_t) __b, (int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcleq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgeuv4si ((int32x4_t) __b, (int32x4_t) __a, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgt_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgtv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcgt_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgtv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcgt_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgtuv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcgt_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgtuv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcgt_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtuv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgtq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgtv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgtq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgtv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcgtq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgtuv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcgtq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgtuv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcgtq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtuv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclt_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgtv8qi (__b, __a, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclt_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgtv4hi (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtv2si (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtv2sf (__b, __a, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclt_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vcgtuv8qi ((int8x8_t) __b, (int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclt_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vcgtuv4hi ((int16x4_t) __b, (int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclt_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcgtuv2si ((int32x2_t) __b, (int32x2_t) __a, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcltq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgtv16qi (__b, __a, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcltq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgtv8hi (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtv4si (__b, __a, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtv4sf (__b, __a, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcltq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcgtuv16qi ((int8x16_t) __b, (int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcltq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcgtuv8hi ((int16x8_t) __b, (int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcltq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcgtuv4si ((int32x4_t) __b, (int32x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcage_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcagev2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcageq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcagev4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcale_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcagev2sf (__b, __a, 3);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcaleq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcagev4sf (__b, __a, 3);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcagt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcagtv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcagtq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcagtv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcalt_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vcagtv2sf (__b, __a, 3);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcaltq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcagtv4sf (__b, __a, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vtstv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtst_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vtstv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtst_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vtstv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vtstv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vtst_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vtstv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vtst_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vtstv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtst_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vtstv8qi ((int8x8_t) __a, (int8x8_t) __b, 2);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vtstv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtstq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vtstv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtstq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vtstv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vtstv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vtstq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vtstv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vtstq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vtstv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vtstq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vtstv16qi ((int8x16_t) __a, (int8x16_t) __b, 2);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vabd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vabdv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vabd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vabdv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vabd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vabdv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vabd_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vabdv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vabd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vabdv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vabd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vabdv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vabd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vabdv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabdq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vabdv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabdq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vabdv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabdq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vabdv4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vabdq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vabdv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vabdq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vabdv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabdq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vabdv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabdq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vabdv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabdl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vabdlv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabdl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vabdlv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabdl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vabdlv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabdl_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vabdlv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabdl_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vabdlv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabdl_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vabdlv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vaba_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vabav8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vaba_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vabav4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vaba_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vabav2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vaba_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vabav8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vaba_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vabav4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vaba_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vabav2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabaq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return (int8x16_t)__builtin_neon_vabav16qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabaq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vabav8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabaq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vabav4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vabaq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return (uint8x16_t)__builtin_neon_vabav16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabaq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vabav8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabaq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vabav4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabal_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vabalv8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vabalv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vabal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vabalv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vabal_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vabalv8qi ((int16x8_t) __a, (int8x8_t) __b, (int8x8_t) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vabal_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vabalv4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vabal_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vabalv2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmax_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vmaxv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmax_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vmaxv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmax_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vmaxv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmax_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vmaxv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmax_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vmaxv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmax_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vmaxv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmax_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vmaxv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmaxq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vmaxv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmaxq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vmaxv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmaxq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmaxv4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmaxq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vmaxv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmaxq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vmaxv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmaxq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vmaxv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmaxv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmin_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vminv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmin_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vminv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmin_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vminv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmin_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vminv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmin_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vminv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmin_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vminv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmin_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vminv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vminq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vminv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vminq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vminv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vminq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vminv4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vminq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vminv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vminq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vminv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vminq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vminv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vminq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vminv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpadd_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vpaddv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpadd_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vpaddv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpadd_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vpaddv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpadd_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vpaddv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpadd_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vpaddv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpadd_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vpaddv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpadd_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vpaddv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpaddl_s8 (int8x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vpaddlv8qi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpaddl_s16 (int16x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vpaddlv4hi (__a, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vpaddl_s32 (int32x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vpaddlv2si (__a, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpaddl_u8 (uint8x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vpaddlv8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpaddl_u16 (uint16x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vpaddlv4hi ((int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vpaddl_u32 (uint32x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vpaddlv2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpaddlq_s8 (int8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vpaddlv16qi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpaddlq_s16 (int16x8_t __a)
+{
+ return (int32x4_t)__builtin_neon_vpaddlv8hi (__a, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vpaddlq_s32 (int32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vpaddlv4si (__a, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpaddlq_u8 (uint8x16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vpaddlv16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpaddlq_u16 (uint16x8_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vpaddlv8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vpaddlq_u32 (uint32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vpaddlv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpadal_s8 (int16x4_t __a, int8x8_t __b)
+{
+ return (int16x4_t)__builtin_neon_vpadalv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpadal_s16 (int32x2_t __a, int16x4_t __b)
+{
+ return (int32x2_t)__builtin_neon_vpadalv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vpadal_s32 (int64x1_t __a, int32x2_t __b)
+{
+ return (int64x1_t)__builtin_neon_vpadalv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpadal_u8 (uint16x4_t __a, uint8x8_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vpadalv8qi ((int16x4_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpadal_u16 (uint32x2_t __a, uint16x4_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vpadalv4hi ((int32x2_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vpadal_u32 (uint64x1_t __a, uint32x2_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vpadalv2si ((int64x1_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vpadalq_s8 (int16x8_t __a, int8x16_t __b)
+{
+ return (int16x8_t)__builtin_neon_vpadalv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vpadalq_s16 (int32x4_t __a, int16x8_t __b)
+{
+ return (int32x4_t)__builtin_neon_vpadalv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vpadalq_s32 (int64x2_t __a, int32x4_t __b)
+{
+ return (int64x2_t)__builtin_neon_vpadalv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vpadalq_u8 (uint16x8_t __a, uint8x16_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vpadalv16qi ((int16x8_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vpadalq_u16 (uint32x4_t __a, uint16x8_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vpadalv8hi ((int32x4_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vpadalq_u32 (uint64x2_t __a, uint32x4_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vpadalv4si ((int64x2_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpmax_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vpmaxv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpmax_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vpmaxv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpmax_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vpmaxv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpmax_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vpmaxv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpmax_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vpmaxv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpmax_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vpmaxv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpmax_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vpmaxv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vpmin_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vpminv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vpmin_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vpminv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vpmin_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vpminv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpmin_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vpminv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vpmin_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vpminv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vpmin_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vpminv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vpmin_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vpminv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrecps_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vrecpsv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrecpsq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vrecpsv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrsqrts_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x2_t)__builtin_neon_vrsqrtsv2sf (__a, __b, 3);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrsqrtsq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return (float32x4_t)__builtin_neon_vrsqrtsv4sf (__a, __b, 3);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vshlv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vshlv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vshlv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vshldi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vshlv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vshlv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vshlv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vshldi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vshlv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vshlv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vshlv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vshlv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vshlv16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vshlv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vshlv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vshlv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vshlv8qi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vshlv4hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vshlv2si (__a, __b, 5);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vshldi (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vshlv8qi ((int8x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vshlv4hi ((int16x4_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vshlv2si ((int32x2_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vshldi ((int64x1_t) __a, __b, 4);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vshlv16qi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vshlv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vshlv4si (__a, __b, 5);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vshlv2di (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vshlv16qi ((int8x16_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vshlv8hi ((int16x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vshlv4si ((int32x4_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vshlv2di ((int64x2_t) __a, __b, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vqshlv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqshlv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqshlv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vqshldi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshlv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshlv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshlv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vqshldi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vqshlv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqshlv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqshlv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqshlv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vqshlv16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vqshlv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vqshlv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vqshlv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqrshl_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vqshlv8qi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrshl_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqshlv4hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrshl_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqshlv2si (__a, __b, 5);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqrshl_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vqshldi (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshl_u8 (uint8x8_t __a, int8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshlv8qi ((int8x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshl_u16 (uint16x4_t __a, int16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshlv4hi ((int16x4_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshl_u32 (uint32x2_t __a, int32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshlv2si ((int32x2_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqrshl_u64 (uint64x1_t __a, int64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vqshldi ((int64x1_t) __a, __b, 4);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqrshlq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vqshlv16qi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrshlq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqshlv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrshlq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqshlv4si (__a, __b, 5);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqrshlq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqshlv2di (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqrshlq_u8 (uint8x16_t __a, int8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vqshlv16qi ((int8x16_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqrshlq_u16 (uint16x8_t __a, int16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vqshlv8hi ((int16x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqrshlq_u32 (uint32x4_t __a, int32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vqshlv4si ((int32x4_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqrshlq_u64 (uint64x2_t __a, int64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vqshlv2di ((int64x2_t) __a, __b, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshr_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshr_nv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshr_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshr_nv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshr_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshr_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshr_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vshr_ndi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshr_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshr_nv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshr_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshr_nv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshr_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshr_nv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshr_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vshr_ndi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshrq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vshr_nv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshrq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vshr_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshrq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vshr_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshrq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vshr_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshrq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vshr_nv16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshrq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vshr_nv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshrq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vshr_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshrq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vshr_nv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrshr_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshr_nv8qi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrshr_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshr_nv4hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrshr_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshr_nv2si (__a, __b, 5);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrshr_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vshr_ndi (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrshr_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshr_nv8qi ((int8x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrshr_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshr_nv4hi ((int16x4_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrshr_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshr_nv2si ((int32x2_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrshr_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vshr_ndi ((int64x1_t) __a, __b, 4);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrshrq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vshr_nv16qi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrshrq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vshr_nv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrshrq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vshr_nv4si (__a, __b, 5);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrshrq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vshr_nv2di (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrshrq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vshr_nv16qi ((int8x16_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrshrq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vshr_nv8hi ((int16x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrshrq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vshr_nv4si ((int32x4_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrshrq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vshr_nv2di ((int64x2_t) __a, __b, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshrn_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshrn_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshrn_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshrn_nv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshrn_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshrn_nv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshrn_nv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshrn_nv4si (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshrn_nv2di (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshrn_nv8hi ((int16x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshrn_nv4si ((int32x4_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshrn_nv2di ((int64x2_t) __a, __b, 4);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vqshrn_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vqshrn_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vqshrn_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshrn_nv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshrn_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshrn_nv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqrshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vqshrn_nv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vqshrn_nv4si (__a, __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vqshrn_nv2di (__a, __b, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshrn_nv8hi ((int16x8_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshrn_nv4si ((int32x4_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshrn_nv2di ((int64x2_t) __a, __b, 4);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshrun_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshrun_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshrun_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshrun_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshrun_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshrun_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqrshrun_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshrun_nv8hi (__a, __b, 5);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqrshrun_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshrun_nv4si (__a, __b, 5);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqrshrun_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshrun_nv2di (__a, __b, 5);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vshl_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vshl_nv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vshl_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vshl_nv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vshl_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vshl_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vshl_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vshl_ndi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vshl_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vshl_nv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vshl_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vshl_nv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vshl_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vshl_nv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vshl_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vshl_ndi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vshlq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vshl_nv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshlq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vshl_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshlq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vshl_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshlq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vshl_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vshlq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vshl_nv16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshlq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vshl_nv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshlq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vshl_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshlq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vshl_nv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqshl_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vqshl_nv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqshl_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vqshl_nv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqshl_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vqshl_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vqshl_n_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vqshl_ndi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshl_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshl_nv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshl_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshl_nv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshl_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshl_nv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshl_n_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vqshl_ndi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqshlq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vqshl_nv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqshlq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vqshl_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqshlq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vqshl_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqshlq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vqshl_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshlq_n_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vqshl_nv16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshlq_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vqshl_nv8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshlq_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vqshl_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshlq_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vqshl_nv2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqshlu_n_s8 (int8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vqshlu_nv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqshlu_n_s16 (int16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vqshlu_nv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqshlu_n_s32 (int32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vqshlu_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vqshlu_n_s64 (int64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vqshlu_ndi (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vqshluq_n_s8 (int8x16_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vqshlu_nv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vqshluq_n_s16 (int16x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vqshlu_nv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vqshluq_n_s32 (int32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vqshlu_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vqshluq_n_s64 (int64x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vqshlu_nv2di (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vshll_n_s8 (int8x8_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vshll_nv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vshll_n_s16 (int16x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vshll_nv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vshll_n_s32 (int32x2_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vshll_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vshll_n_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vshll_nv8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vshll_n_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vshll_nv4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vshll_n_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vshll_nv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vsra_nv8qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vsra_nv4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vsra_nv2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vsra_ndi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vsra_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vsra_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vsra_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vsra_ndi ((int64x1_t) __a, (int64x1_t) __b, __c, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vsra_nv16qi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vsra_nv8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vsra_nv4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vsra_nv2di (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vsra_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vsra_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vsra_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vsra_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vsra_nv8qi (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vsra_nv4hi (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vsra_nv2si (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vrsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vsra_ndi (__a, __b, __c, 5);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vsra_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c, 4);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vsra_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c, 4);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vsra_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c, 4);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vrsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vsra_ndi ((int64x1_t) __a, (int64x1_t) __b, __c, 4);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vsra_nv16qi (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vsra_nv8hi (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vsra_nv4si (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vrsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vsra_nv2di (__a, __b, __c, 5);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vsra_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c, 4);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vsra_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c, 4);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vsra_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c, 4);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vrsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vsra_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c, 4);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vsri_n_p64 (poly64x1_t __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t)__builtin_neon_vsri_ndi (__a, __b, __c);
+}
+
+#endif
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsri_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vsri_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsri_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vsri_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsri_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vsri_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsri_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vsri_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsri_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vsri_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsri_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vsri_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsri_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vsri_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsri_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vsri_ndi ((int64x1_t) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vsri_n_p8 (poly8x8_t __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vsri_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vsri_n_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vsri_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vsriq_n_p64 (poly64x2_t __a, poly64x2_t __b, const int __c)
+{
+ return (poly64x2_t)__builtin_neon_vsri_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+#endif
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vsri_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vsri_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vsri_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsriq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vsri_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vsri_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vsri_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vsri_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsriq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vsri_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vsriq_n_p8 (poly8x16_t __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vsri_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vsriq_n_p16 (poly16x8_t __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vsri_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vsli_n_p64 (poly64x1_t __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t)__builtin_neon_vsli_ndi (__a, __b, __c);
+}
+
+#endif
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vsli_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vsli_nv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vsli_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vsli_nv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vsli_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vsli_nv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vsli_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vsli_ndi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vsli_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vsli_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vsli_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vsli_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vsli_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vsli_nv2si ((int32x2_t) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vsli_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vsli_ndi ((int64x1_t) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vsli_n_p8 (poly8x8_t __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vsli_nv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vsli_n_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vsli_nv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vsliq_n_p64 (poly64x2_t __a, poly64x2_t __b, const int __c)
+{
+ return (poly64x2_t)__builtin_neon_vsli_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+#endif
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vsli_nv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vsli_nv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vsli_nv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsliq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vsli_nv2di (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vsli_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vsli_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vsli_nv4si ((int32x4_t) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsliq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vsli_nv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vsliq_n_p8 (poly8x16_t __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vsli_nv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vsliq_n_p16 (poly16x8_t __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vsli_nv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vabs_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vabsv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vabs_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vabsv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vabs_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vabsv2si (__a, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vabs_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vabsv2sf (__a, 3);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vabsq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vabsv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vabsq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vabsv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vabsq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vabsv4si (__a, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vabsq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vabsv4sf (__a, 3);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqabs_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vqabsv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqabs_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vqabsv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqabs_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vqabsv2si (__a, 1);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqabsq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vqabsv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqabsq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vqabsv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqabsq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vqabsv4si (__a, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vneg_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vnegv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vneg_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vnegv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vneg_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vnegv2si (__a, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vneg_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vnegv2sf (__a, 3);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vnegq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vnegv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vnegq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vnegv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vnegq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vnegv4si (__a, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vnegq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vnegv4sf (__a, 3);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqneg_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vqnegv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqneg_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vqnegv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqneg_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vqnegv2si (__a, 1);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vqnegq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vqnegv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqnegq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vqnegv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqnegq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vqnegv4si (__a, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmvn_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vmvnv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmvn_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vmvnv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmvn_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vmvnv2si (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmvn_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vmvnv8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmvn_u16 (uint16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vmvnv4hi ((int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmvn_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vmvnv2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmvn_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vmvnv8qi ((int8x8_t) __a, 2);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmvnq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vmvnv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmvnq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vmvnv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmvnq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vmvnv4si (__a, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmvnq_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vmvnv16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmvnq_u16 (uint16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vmvnv8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmvnq_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vmvnv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmvnq_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vmvnv16qi ((int8x16_t) __a, 2);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcls_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vclsv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vcls_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vclsv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcls_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vclsv2si (__a, 1);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vclsq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vclsv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vclsq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vclsv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vclsq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vclsv4si (__a, 1);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vclz_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vclzv8qi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vclz_s16 (int16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vclzv4hi (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vclz_s32 (int32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vclzv2si (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vclz_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vclzv8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vclz_u16 (uint16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vclzv4hi ((int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vclz_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vclzv2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vclzq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vclzv16qi (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vclzq_s16 (int16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vclzv8hi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vclzq_s32 (int32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vclzv4si (__a, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vclzq_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vclzv16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vclzq_u16 (uint16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vclzv8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vclzq_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vclzv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcnt_s8 (int8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vcntv8qi (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcnt_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vcntv8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vcnt_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vcntv8qi ((int8x8_t) __a, 2);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vcntq_s8 (int8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vcntv16qi (__a, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcntq_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vcntv16qi ((int8x16_t) __a, 0);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vcntq_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vcntv16qi ((int8x16_t) __a, 2);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrecpe_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrecpev2sf (__a, 3);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrecpe_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vrecpev2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrecpeq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrecpev4sf (__a, 3);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrecpeq_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vrecpev4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrsqrte_f32 (float32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vrsqrtev2sf (__a, 3);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrsqrte_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vrsqrtev2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrsqrteq_f32 (float32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vrsqrtev4sf (__a, 3);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrsqrteq_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vrsqrtev4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vget_lane_s8 (int8x8_t __a, const int __b)
+{
+ return (int8_t)__builtin_neon_vget_lanev8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vget_lane_s16 (int16x4_t __a, const int __b)
+{
+ return (int16_t)__builtin_neon_vget_lanev4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vget_lane_s32 (int32x2_t __a, const int __b)
+{
+ return (int32_t)__builtin_neon_vget_lanev2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vget_lane_f32 (float32x2_t __a, const int __b)
+{
+ return (float32_t)__builtin_neon_vget_lanev2sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vget_lane_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8_t)__builtin_neon_vget_lanev8qi ((int8x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vget_lane_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16_t)__builtin_neon_vget_lanev4hi ((int16x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vget_lane_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32_t)__builtin_neon_vget_lanev2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+vget_lane_p8 (poly8x8_t __a, const int __b)
+{
+ return (poly8_t)__builtin_neon_vget_lanev8qi ((int8x8_t) __a, __b, 2);
+}
+
+__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+vget_lane_p16 (poly16x4_t __a, const int __b)
+{
+ return (poly16_t)__builtin_neon_vget_lanev4hi ((int16x4_t) __a, __b, 2);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vget_lane_s64 (int64x1_t __a, const int __b)
+{
+ return (int64_t)__builtin_neon_vget_lanedi (__a, __b, 1);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vget_lane_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64_t)__builtin_neon_vget_lanedi ((int64x1_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+vgetq_lane_s8 (int8x16_t __a, const int __b)
+{
+ return (int8_t)__builtin_neon_vget_lanev16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vgetq_lane_s16 (int16x8_t __a, const int __b)
+{
+ return (int16_t)__builtin_neon_vget_lanev8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vgetq_lane_s32 (int32x4_t __a, const int __b)
+{
+ return (int32_t)__builtin_neon_vget_lanev4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vgetq_lane_f32 (float32x4_t __a, const int __b)
+{
+ return (float32_t)__builtin_neon_vget_lanev4sf (__a, __b, 3);
+}
+
+__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+vgetq_lane_u8 (uint8x16_t __a, const int __b)
+{
+ return (uint8_t)__builtin_neon_vget_lanev16qi ((int8x16_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vgetq_lane_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint16_t)__builtin_neon_vget_lanev8hi ((int16x8_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vgetq_lane_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint32_t)__builtin_neon_vget_lanev4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+vgetq_lane_p8 (poly8x16_t __a, const int __b)
+{
+ return (poly8_t)__builtin_neon_vget_lanev16qi ((int8x16_t) __a, __b, 2);
+}
+
+__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+vgetq_lane_p16 (poly16x8_t __a, const int __b)
+{
+ return (poly16_t)__builtin_neon_vget_lanev8hi ((int16x8_t) __a, __b, 2);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vgetq_lane_s64 (int64x2_t __a, const int __b)
+{
+ return (int64_t)__builtin_neon_vget_lanev2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vgetq_lane_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint64_t)__builtin_neon_vget_lanev2di ((int64x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vset_lane_s8 (int8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vset_lanev8qi ((__builtin_neon_qi) __a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vset_lane_s16 (int16_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vset_lanev4hi ((__builtin_neon_hi) __a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vset_lane_s32 (int32_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vset_lanev2si ((__builtin_neon_si) __a, __b, __c);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vset_lane_f32 (float32_t __a, float32x2_t __b, const int __c)
+{
+ return (float32x2_t)__builtin_neon_vset_lanev2sf ((__builtin_neon_sf) __a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vset_lane_u8 (uint8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vset_lanev8qi ((__builtin_neon_qi) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vset_lane_u16 (uint16_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vset_lanev4hi ((__builtin_neon_hi) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vset_lane_u32 (uint32_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vset_lanev2si ((__builtin_neon_si) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vset_lane_p8 (poly8_t __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vset_lanev8qi ((__builtin_neon_qi) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vset_lane_p16 (poly16_t __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vset_lanev4hi ((__builtin_neon_hi) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vset_lane_s64 (int64_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vset_lanedi ((__builtin_neon_di) __a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vset_lane_u64 (uint64_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vset_lanedi ((__builtin_neon_di) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vsetq_lane_s8 (int8_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vset_lanev16qi ((__builtin_neon_qi) __a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vsetq_lane_s16 (int16_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vset_lanev8hi ((__builtin_neon_hi) __a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vsetq_lane_s32 (int32_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vset_lanev4si ((__builtin_neon_si) __a, __b, __c);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vsetq_lane_f32 (float32_t __a, float32x4_t __b, const int __c)
+{
+ return (float32x4_t)__builtin_neon_vset_lanev4sf ((__builtin_neon_sf) __a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vsetq_lane_u8 (uint8_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vset_lanev16qi ((__builtin_neon_qi) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vsetq_lane_u16 (uint16_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vset_lanev8hi ((__builtin_neon_hi) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsetq_lane_u32 (uint32_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vset_lanev4si ((__builtin_neon_si) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vsetq_lane_p8 (poly8_t __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vset_lanev16qi ((__builtin_neon_qi) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vsetq_lane_p16 (poly16_t __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vset_lanev8hi ((__builtin_neon_hi) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vsetq_lane_s64 (int64_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vset_lanev2di ((__builtin_neon_di) __a, __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vsetq_lane_u64 (uint64_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vset_lanev2di ((__builtin_neon_di) __a, (int64x2_t) __b, __c);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vcreate_p64 (uint64_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vcreatedi ((__builtin_neon_di) __a);
+}
+
+#endif
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vcreate_s8 (uint64_t __a)
+{
+ return (int8x8_t)__builtin_neon_vcreatev8qi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vcreate_s16 (uint64_t __a)
+{
+ return (int16x4_t)__builtin_neon_vcreatev4hi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcreate_s32 (uint64_t __a)
+{
+ return (int32x2_t)__builtin_neon_vcreatev2si ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vcreate_s64 (uint64_t __a)
+{
+ return (int64x1_t)__builtin_neon_vcreatedi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcreate_f32 (uint64_t __a)
+{
+ return (float32x2_t)__builtin_neon_vcreatev2sf ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vcreate_u8 (uint64_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vcreatev8qi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vcreate_u16 (uint64_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vcreatev4hi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcreate_u32 (uint64_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vcreatev2si ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vcreate_u64 (uint64_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vcreatedi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vcreate_p8 (uint64_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vcreatev8qi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vcreate_p16 (uint64_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vcreatev4hi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vdup_n_s8 (int8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vdup_n_s16 (int16_t __a)
+{
+ return (int16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vdup_n_s32 (int32_t __a)
+{
+ return (int32x2_t)__builtin_neon_vdup_nv2si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vdup_n_f32 (float32_t __a)
+{
+ return (float32x2_t)__builtin_neon_vdup_nv2sf ((__builtin_neon_sf) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vdup_n_u8 (uint8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vdup_n_u16 (uint16_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vdup_n_u32 (uint32_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vdup_nv2si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vdup_n_p8 (poly8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vdup_n_p16 (poly16_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vdup_n_p64 (poly64_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a);
+}
+
+#endif
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vdup_n_s64 (int64_t __a)
+{
+ return (int64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vdup_n_u64 (uint64_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vdupq_n_p64 (poly64_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a);
+}
+
+#endif
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vdupq_n_s8 (int8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vdupq_n_s16 (int16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vdupq_n_s32 (int32_t __a)
+{
+ return (int32x4_t)__builtin_neon_vdup_nv4si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vdupq_n_f32 (float32_t __a)
+{
+ return (float32x4_t)__builtin_neon_vdup_nv4sf ((__builtin_neon_sf) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vdupq_n_u8 (uint8_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vdupq_n_u16 (uint16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vdupq_n_u32 (uint32_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vdup_nv4si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vdupq_n_p8 (poly8_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vdupq_n_p16 (poly16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vdupq_n_s64 (int64_t __a)
+{
+ return (int64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vdupq_n_u64 (uint64_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmov_n_s8 (int8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmov_n_s16 (int16_t __a)
+{
+ return (int16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmov_n_s32 (int32_t __a)
+{
+ return (int32x2_t)__builtin_neon_vdup_nv2si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmov_n_f32 (float32_t __a)
+{
+ return (float32x2_t)__builtin_neon_vdup_nv2sf ((__builtin_neon_sf) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmov_n_u8 (uint8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmov_n_u16 (uint16_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmov_n_u32 (uint32_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vdup_nv2si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmov_n_p8 (poly8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vdup_nv8qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vmov_n_p16 (poly16_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vdup_nv4hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vmov_n_s64 (int64_t __a)
+{
+ return (int64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vmov_n_u64 (uint64_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vdup_ndi ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmovq_n_s8 (int8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovq_n_s16 (int16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovq_n_s32 (int32_t __a)
+{
+ return (int32x4_t)__builtin_neon_vdup_nv4si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmovq_n_f32 (float32_t __a)
+{
+ return (float32x4_t)__builtin_neon_vdup_nv4sf ((__builtin_neon_sf) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmovq_n_u8 (uint8_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovq_n_u16 (uint16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovq_n_u32 (uint32_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vdup_nv4si ((__builtin_neon_si) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmovq_n_p8 (poly8_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vdup_nv16qi ((__builtin_neon_qi) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vmovq_n_p16 (poly16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vdup_nv8hi ((__builtin_neon_hi) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmovq_n_s64 (int64_t __a)
+{
+ return (int64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmovq_n_u64 (uint64_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vdup_nv2di ((__builtin_neon_di) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vdup_lane_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x8_t)__builtin_neon_vdup_lanev8qi (__a, __b);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vdup_lane_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x4_t)__builtin_neon_vdup_lanev4hi (__a, __b);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vdup_lane_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vdup_lanev2si (__a, __b);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vdup_lane_f32 (float32x2_t __a, const int __b)
+{
+ return (float32x2_t)__builtin_neon_vdup_lanev2sf (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vdup_lane_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_neon_vdup_lanev8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vdup_lane_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_neon_vdup_lanev4hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vdup_lane_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vdup_lanev2si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vdup_lane_p8 (poly8x8_t __a, const int __b)
+{
+ return (poly8x8_t)__builtin_neon_vdup_lanev8qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vdup_lane_p16 (poly16x4_t __a, const int __b)
+{
+ return (poly16x4_t)__builtin_neon_vdup_lanev4hi ((int16x4_t) __a, __b);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vdup_lane_p64 (poly64x1_t __a, const int __b)
+{
+ return (poly64x1_t)__builtin_neon_vdup_lanedi (__a, __b);
+}
+
+#endif
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vdup_lane_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x1_t)__builtin_neon_vdup_lanedi (__a, __b);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vdup_lane_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x1_t)__builtin_neon_vdup_lanedi ((int64x1_t) __a, __b);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vdupq_lane_s8 (int8x8_t __a, const int __b)
+{
+ return (int8x16_t)__builtin_neon_vdup_lanev16qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vdupq_lane_s16 (int16x4_t __a, const int __b)
+{
+ return (int16x8_t)__builtin_neon_vdup_lanev8hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vdupq_lane_s32 (int32x2_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vdup_lanev4si (__a, __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vdupq_lane_f32 (float32x2_t __a, const int __b)
+{
+ return (float32x4_t)__builtin_neon_vdup_lanev4sf (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vdupq_lane_u8 (uint8x8_t __a, const int __b)
+{
+ return (uint8x16_t)__builtin_neon_vdup_lanev16qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vdupq_lane_u16 (uint16x4_t __a, const int __b)
+{
+ return (uint16x8_t)__builtin_neon_vdup_lanev8hi ((int16x4_t) __a, __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vdupq_lane_u32 (uint32x2_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vdup_lanev4si ((int32x2_t) __a, __b);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vdupq_lane_p8 (poly8x8_t __a, const int __b)
+{
+ return (poly8x16_t)__builtin_neon_vdup_lanev16qi ((int8x8_t) __a, __b);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vdupq_lane_p16 (poly16x4_t __a, const int __b)
+{
+ return (poly16x8_t)__builtin_neon_vdup_lanev8hi ((int16x4_t) __a, __b);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vdupq_lane_p64 (poly64x1_t __a, const int __b)
+{
+ return (poly64x2_t)__builtin_neon_vdup_lanev2di (__a, __b);
+}
+
+#endif
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vdupq_lane_s64 (int64x1_t __a, const int __b)
+{
+ return (int64x2_t)__builtin_neon_vdup_lanev2di (__a, __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vdupq_lane_u64 (uint64x1_t __a, const int __b)
+{
+ return (uint64x2_t)__builtin_neon_vdup_lanev2di ((int64x1_t) __a, __b);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vcombine_p64 (poly64x1_t __a, poly64x1_t __b)
+{
+ return (poly64x2_t)__builtin_neon_vcombinedi (__a, __b);
+}
+
+#endif
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vcombine_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x16_t)__builtin_neon_vcombinev8qi (__a, __b);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vcombine_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x8_t)__builtin_neon_vcombinev4hi (__a, __b);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcombine_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x4_t)__builtin_neon_vcombinev2si (__a, __b);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vcombine_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x2_t)__builtin_neon_vcombinedi (__a, __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcombine_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return (float32x4_t)__builtin_neon_vcombinev2sf (__a, __b);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vcombine_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vcombinev8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vcombine_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vcombinev4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcombine_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vcombinev2si ((int32x2_t) __a, (int32x2_t) __b);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vcombine_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vcombinedi ((int64x1_t) __a, (int64x1_t) __b);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vcombine_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ return (poly8x16_t)__builtin_neon_vcombinev8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vcombine_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ return (poly16x8_t)__builtin_neon_vcombinev4hi ((int16x4_t) __a, (int16x4_t) __b);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vget_high_p64 (poly64x2_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vget_highv2di ((int64x2_t) __a);
+}
+
+#endif
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vget_high_s8 (int8x16_t __a)
+{
+ return (int8x8_t)__builtin_neon_vget_highv16qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vget_high_s16 (int16x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vget_highv8hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vget_high_s32 (int32x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vget_highv4si (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vget_high_s64 (int64x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vget_highv2di (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vget_high_f32 (float32x4_t __a)
+{
+ return (float32x2_t)__builtin_neon_vget_highv4sf (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vget_high_u8 (uint8x16_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vget_highv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vget_high_u16 (uint16x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vget_highv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vget_high_u32 (uint32x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vget_highv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vget_high_u64 (uint64x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vget_highv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vget_high_p8 (poly8x16_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vget_highv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vget_high_p16 (poly16x8_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vget_highv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vget_low_s8 (int8x16_t __a)
+{
+ return (int8x8_t)__builtin_neon_vget_lowv16qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vget_low_s16 (int16x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vget_lowv8hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vget_low_s32 (int32x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vget_lowv4si (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vget_low_f32 (float32x4_t __a)
+{
+ return (float32x2_t)__builtin_neon_vget_lowv4sf (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vget_low_u8 (uint8x16_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vget_lowv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vget_low_u16 (uint16x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vget_lowv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vget_low_u32 (uint32x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vget_lowv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vget_low_p8 (poly8x16_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vget_lowv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vget_low_p16 (poly16x8_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vget_lowv8hi ((int16x8_t) __a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vget_low_p64 (poly64x2_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vget_lowv2di ((int64x2_t) __a);
+}
+
+#endif
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vget_low_s64 (int64x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vget_lowv2di (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vget_low_u64 (uint64x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vget_lowv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvt_s32_f32 (float32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vcvtv2sf (__a, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_f32_s32 (int32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vcvtv2si (__a, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_f32_u32 (uint32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vcvtv2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvt_u32_f32 (float32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vcvtv2sf (__a, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtq_s32_f32 (float32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vcvtv4sf (__a, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_f32_s32 (int32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vcvtv4si (__a, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_f32_u32 (uint32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vcvtv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtq_u32_f32 (float32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vcvtv4sf (__a, 0);
+}
+
+#if ((__ARM_FP & 0x2) != 0)
+__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+vcvt_f16_f32 (float32x4_t __a)
+{
+ return (float16x4_t)__builtin_neon_vcvtv4hfv4sf (__a);
+}
+
+#endif
+#if ((__ARM_FP & 0x2) != 0)
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvt_f32_f16 (float16x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vcvtv4sfv4hf (__a);
+}
+
+#endif
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vcvt_n_s32_f32 (float32x2_t __a, const int __b)
+{
+ return (int32x2_t)__builtin_neon_vcvt_nv2sf (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_n_f32_s32 (int32x2_t __a, const int __b)
+{
+ return (float32x2_t)__builtin_neon_vcvt_nv2si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vcvt_n_f32_u32 (uint32x2_t __a, const int __b)
+{
+ return (float32x2_t)__builtin_neon_vcvt_nv2si ((int32x2_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vcvt_n_u32_f32 (float32x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_neon_vcvt_nv2sf (__a, __b, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vcvtq_n_s32_f32 (float32x4_t __a, const int __b)
+{
+ return (int32x4_t)__builtin_neon_vcvt_nv4sf (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_n_f32_s32 (int32x4_t __a, const int __b)
+{
+ return (float32x4_t)__builtin_neon_vcvt_nv4si (__a, __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vcvtq_n_f32_u32 (uint32x4_t __a, const int __b)
+{
+ return (float32x4_t)__builtin_neon_vcvt_nv4si ((int32x4_t) __a, __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vcvtq_n_u32_f32 (float32x4_t __a, const int __b)
+{
+ return (uint32x4_t)__builtin_neon_vcvt_nv4sf (__a, __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmovn_s16 (int16x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vmovnv8hi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmovn_s32 (int32x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vmovnv4si (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmovn_s64 (int64x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vmovnv2di (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmovn_u16 (uint16x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vmovnv8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmovn_u32 (uint32x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vmovnv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmovn_u64 (uint64x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vmovnv2di ((int64x2_t) __a, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vqmovn_s16 (int16x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vqmovnv8hi (__a, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqmovn_s32 (int32x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vqmovnv4si (__a, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqmovn_s64 (int64x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vqmovnv2di (__a, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqmovn_u16 (uint16x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vqmovnv8hi ((int16x8_t) __a, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqmovn_u32 (uint32x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vqmovnv4si ((int32x4_t) __a, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqmovn_u64 (uint64x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vqmovnv2di ((int64x2_t) __a, 0);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vqmovun_s16 (int16x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vqmovunv8hi (__a, 1);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vqmovun_s32 (int32x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vqmovunv4si (__a, 1);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vqmovun_s64 (int64x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vqmovunv2di (__a, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmovl_s8 (int8x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vmovlv8qi (__a, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmovl_s16 (int16x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vmovlv4hi (__a, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmovl_s32 (int32x2_t __a)
+{
+ return (int64x2_t)__builtin_neon_vmovlv2si (__a, 1);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmovl_u8 (uint8x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vmovlv8qi ((int8x8_t) __a, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmovl_u16 (uint16x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vmovlv4hi ((int16x4_t) __a, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmovl_u32 (uint32x2_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vmovlv2si ((int32x2_t) __a, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl1_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vtbl1v8qi (__a, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl1_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vtbl1v8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl1_p8 (poly8x8_t __a, uint8x8_t __b)
+{
+ return (poly8x8_t)__builtin_neon_vtbl1v8qi ((int8x8_t) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl2_s8 (int8x8x2_t __a, int8x8_t __b)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __au = { __a };
+ return (int8x8_t)__builtin_neon_vtbl2v8qi (__au.__o, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl2_u8 (uint8x8x2_t __a, uint8x8_t __b)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __au = { __a };
+ return (uint8x8_t)__builtin_neon_vtbl2v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl2_p8 (poly8x8x2_t __a, uint8x8_t __b)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __au = { __a };
+ return (poly8x8_t)__builtin_neon_vtbl2v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl3_s8 (int8x8x3_t __a, int8x8_t __b)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __au = { __a };
+ return (int8x8_t)__builtin_neon_vtbl3v8qi (__au.__o, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl3_u8 (uint8x8x3_t __a, uint8x8_t __b)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __au = { __a };
+ return (uint8x8_t)__builtin_neon_vtbl3v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl3_p8 (poly8x8x3_t __a, uint8x8_t __b)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __au = { __a };
+ return (poly8x8_t)__builtin_neon_vtbl3v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbl4_s8 (int8x8x4_t __a, int8x8_t __b)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __au = { __a };
+ return (int8x8_t)__builtin_neon_vtbl4v8qi (__au.__o, __b);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbl4_u8 (uint8x8x4_t __a, uint8x8_t __b)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __au = { __a };
+ return (uint8x8_t)__builtin_neon_vtbl4v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbl4_p8 (poly8x8x4_t __a, uint8x8_t __b)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __au = { __a };
+ return (poly8x8_t)__builtin_neon_vtbl4v8qi (__au.__o, (int8x8_t) __b);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx1_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vtbx1v8qi (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx1_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vtbx1v8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx1_p8 (poly8x8_t __a, poly8x8_t __b, uint8x8_t __c)
+{
+ return (poly8x8_t)__builtin_neon_vtbx1v8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx2_s8 (int8x8_t __a, int8x8x2_t __b, int8x8_t __c)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ return (int8x8_t)__builtin_neon_vtbx2v8qi (__a, __bu.__o, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx2_u8 (uint8x8_t __a, uint8x8x2_t __b, uint8x8_t __c)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ return (uint8x8_t)__builtin_neon_vtbx2v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx2_p8 (poly8x8_t __a, poly8x8x2_t __b, uint8x8_t __c)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ return (poly8x8_t)__builtin_neon_vtbx2v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx3_s8 (int8x8_t __a, int8x8x3_t __b, int8x8_t __c)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ return (int8x8_t)__builtin_neon_vtbx3v8qi (__a, __bu.__o, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx3_u8 (uint8x8_t __a, uint8x8x3_t __b, uint8x8_t __c)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ return (uint8x8_t)__builtin_neon_vtbx3v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx3_p8 (poly8x8_t __a, poly8x8x3_t __b, uint8x8_t __c)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ return (poly8x8_t)__builtin_neon_vtbx3v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vtbx4_s8 (int8x8_t __a, int8x8x4_t __b, int8x8_t __c)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ return (int8x8_t)__builtin_neon_vtbx4v8qi (__a, __bu.__o, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vtbx4_u8 (uint8x8_t __a, uint8x8x4_t __b, uint8x8_t __c)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ return (uint8x8_t)__builtin_neon_vtbx4v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vtbx4_p8 (poly8x8_t __a, poly8x8x4_t __b, uint8x8_t __c)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ return (poly8x8_t)__builtin_neon_vtbx4v8qi ((int8x8_t) __a, __bu.__o, (int8x8_t) __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vmul_lanev4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vmul_lanev2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_lane_f32 (float32x2_t __a, float32x2_t __b, const int __c)
+{
+ return (float32x2_t)__builtin_neon_vmul_lanev2sf (__a, __b, __c, 3);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vmul_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vmul_lanev2si ((int32x2_t) __a, (int32x2_t) __b, __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vmul_lanev8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vmul_lanev4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_lane_f32 (float32x4_t __a, float32x2_t __b, const int __c)
+{
+ return (float32x4_t)__builtin_neon_vmul_lanev4sf (__a, __b, __c, 3);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_lane_u16 (uint16x8_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vmul_lanev8hi ((int16x8_t) __a, (int16x4_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_lane_u32 (uint32x4_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vmul_lanev4si ((int32x4_t) __a, (int32x2_t) __b, __c, 0);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmla_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x4_t)__builtin_neon_vmla_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmla_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x2_t)__builtin_neon_vmla_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmla_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __d)
+{
+ return (float32x2_t)__builtin_neon_vmla_lanev2sf (__a, __b, __c, __d, 3);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmla_lane_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint16x4_t)__builtin_neon_vmla_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmla_lane_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint32x2_t)__builtin_neon_vmla_lanev2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlaq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x8_t)__builtin_neon_vmla_lanev8hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlaq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vmla_lanev4si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __d)
+{
+ return (float32x4_t)__builtin_neon_vmla_lanev4sf (__a, __b, __c, __d, 3);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlaq_lane_u16 (uint16x8_t __a, uint16x8_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint16x8_t)__builtin_neon_vmla_lanev8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlaq_lane_u32 (uint32x4_t __a, uint32x4_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint32x4_t)__builtin_neon_vmla_lanev4si ((int32x4_t) __a, (int32x4_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vmlal_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int64x2_t)__builtin_neon_vmlal_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_lane_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint32x4_t)__builtin_neon_vmlal_lanev4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_lane_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint64x2_t)__builtin_neon_vmlal_lanev2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vqdmlal_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int64x2_t)__builtin_neon_vqdmlal_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmls_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x4_t)__builtin_neon_vmls_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmls_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x2_t)__builtin_neon_vmls_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmls_lane_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c, const int __d)
+{
+ return (float32x2_t)__builtin_neon_vmls_lanev2sf (__a, __b, __c, __d, 3);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmls_lane_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint16x4_t)__builtin_neon_vmls_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmls_lane_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint32x2_t)__builtin_neon_vmls_lanev2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
+{
+ return (int16x8_t)__builtin_neon_vmls_lanev8hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vmls_lanev4si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b, float32x2_t __c, const int __d)
+{
+ return (float32x4_t)__builtin_neon_vmls_lanev4sf (__a, __b, __c, __d, 3);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsq_lane_u16 (uint16x8_t __a, uint16x8_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint16x8_t)__builtin_neon_vmls_lanev8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsq_lane_u32 (uint32x4_t __a, uint32x4_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint32x4_t)__builtin_neon_vmls_lanev4si ((int32x4_t) __a, (int32x4_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vmlsl_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int64x2_t)__builtin_neon_vmlsl_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_lane_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
+{
+ return (uint32x4_t)__builtin_neon_vmlsl_lanev4hi ((int32x4_t) __a, (int16x4_t) __b, (int16x4_t) __c, __d, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_lane_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
+{
+ return (uint64x2_t)__builtin_neon_vmlsl_lanev2si ((int64x2_t) __a, (int32x2_t) __b, (int32x2_t) __c, __d, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
+{
+ return (int32x4_t)__builtin_neon_vqdmlsl_lanev4hi (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
+{
+ return (int64x2_t)__builtin_neon_vqdmlsl_lanev2si (__a, __b, __c, __d, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vmull_lanev4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vmull_lanev2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vmull_lanev4hi ((int16x4_t) __a, (int16x4_t) __b, __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vmull_lanev2si ((int32x2_t) __a, (int32x2_t) __b, __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmull_lanev4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmull_lanev2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vqdmulh_lanev8hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmulh_lanev4si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vqdmulh_lanev4hi (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vqdmulh_lanev2si (__a, __b, __c, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vqdmulh_lanev8hi (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmulh_lanev4si (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vqdmulh_lanev4hi (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vqdmulh_lanev2si (__a, __b, __c, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int16x4_t)__builtin_neon_vmul_nv4hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int32x2_t)__builtin_neon_vmul_nv2si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_n_f32 (float32x2_t __a, float32_t __b)
+{
+ return (float32x2_t)__builtin_neon_vmul_nv2sf (__a, (__builtin_neon_sf) __b, 3);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_n_u16 (uint16x4_t __a, uint16_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vmul_nv4hi ((int16x4_t) __a, (__builtin_neon_hi) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_n_u32 (uint32x2_t __a, uint32_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vmul_nv2si ((int32x2_t) __a, (__builtin_neon_si) __b, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return (int16x8_t)__builtin_neon_vmul_nv8hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmul_nv4si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_n_f32 (float32x4_t __a, float32_t __b)
+{
+ return (float32x4_t)__builtin_neon_vmul_nv4sf (__a, (__builtin_neon_sf) __b, 3);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vmul_nv8hi ((int16x8_t) __a, (__builtin_neon_hi) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmul_nv4si ((int32x4_t) __a, (__builtin_neon_si) __b, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmull_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int32x4_t)__builtin_neon_vmull_nv4hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmull_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int64x2_t)__builtin_neon_vmull_nv2si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmull_n_u16 (uint16x4_t __a, uint16_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vmull_nv4hi ((int16x4_t) __a, (__builtin_neon_hi) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmull_n_u32 (uint32x2_t __a, uint32_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vmull_nv2si ((int32x2_t) __a, (__builtin_neon_si) __b, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmull_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmull_nv4hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmull_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int64x2_t)__builtin_neon_vqdmull_nv2si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqdmulhq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqdmulh_nv8hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmulhq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmulh_nv4si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqdmulh_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqdmulh_nv4hi (__a, (__builtin_neon_hi) __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqdmulh_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqdmulh_nv2si (__a, (__builtin_neon_si) __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vqrdmulhq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return (int16x8_t)__builtin_neon_vqdmulh_nv8hi (__a, (__builtin_neon_hi) __b, 5);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqrdmulhq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return (int32x4_t)__builtin_neon_vqdmulh_nv4si (__a, (__builtin_neon_si) __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vqrdmulh_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return (int16x4_t)__builtin_neon_vqdmulh_nv4hi (__a, (__builtin_neon_hi) __b, 5);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vqrdmulh_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return (int32x2_t)__builtin_neon_vqdmulh_nv2si (__a, (__builtin_neon_si) __b, 5);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmla_n_s16 (int16x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int16x4_t)__builtin_neon_vmla_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmla_n_s32 (int32x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int32x2_t)__builtin_neon_vmla_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
+{
+ return (float32x2_t)__builtin_neon_vmla_nv2sf (__a, __b, (__builtin_neon_sf) __c, 3);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmla_n_u16 (uint16x4_t __a, uint16x4_t __b, uint16_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vmla_nv4hi ((int16x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmla_n_u32 (uint32x2_t __a, uint32x2_t __b, uint32_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vmla_nv2si ((int32x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlaq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmla_nv8hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmla_nv4si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
+{
+ return (float32x4_t)__builtin_neon_vmla_nv4sf (__a, __b, (__builtin_neon_sf) __c, 3);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlaq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmla_nv8hi ((int16x8_t) __a, (int16x8_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlaq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmla_nv4si ((int32x4_t) __a, (int32x4_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlal_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int64x2_t)__builtin_neon_vmlal_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlal_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlal_nv4hi ((int32x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlal_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vmlal_nv2si ((int64x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmlal_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmlal_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmls_n_s16 (int16x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int16x4_t)__builtin_neon_vmls_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmls_n_s32 (int32x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int32x2_t)__builtin_neon_vmls_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
+{
+ return (float32x2_t)__builtin_neon_vmls_nv2sf (__a, __b, (__builtin_neon_sf) __c, 3);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmls_n_u16 (uint16x4_t __a, uint16x4_t __b, uint16_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vmls_nv4hi ((int16x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmls_n_u32 (uint32x2_t __a, uint32x2_t __b, uint32_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vmls_nv2si ((int32x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmlsq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
+{
+ return (int16x8_t)__builtin_neon_vmls_nv8hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmls_nv4si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
+{
+ return (float32x4_t)__builtin_neon_vmls_nv4sf (__a, __b, (__builtin_neon_sf) __c, 3);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmlsq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vmls_nv8hi ((int16x8_t) __a, (int16x8_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmls_nv4si ((int32x4_t) __a, (int32x4_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int32x4_t)__builtin_neon_vmlsl_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int64x2_t)__builtin_neon_vmlsl_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmlsl_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vmlsl_nv4hi ((int32x4_t) __a, (int16x4_t) __b, (__builtin_neon_hi) __c, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vmlsl_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vmlsl_nv2si ((int64x2_t) __a, (int32x2_t) __b, (__builtin_neon_si) __c, 0);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vqdmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
+{
+ return (int32x4_t)__builtin_neon_vqdmlsl_nv4hi (__a, __b, (__builtin_neon_hi) __c, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vqdmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
+{
+ return (int64x2_t)__builtin_neon_vqdmlsl_nv2si (__a, __b, (__builtin_neon_si) __c, 1);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vext_p64 (poly64x1_t __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t)__builtin_neon_vextdi (__a, __b, __c);
+}
+
+#endif
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vext_s8 (int8x8_t __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vextv8qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vext_s16 (int16x4_t __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vextv4hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vext_s32 (int32x2_t __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vextv2si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vext_s64 (int64x1_t __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vextdi (__a, __b, __c);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vext_f32 (float32x2_t __a, float32x2_t __b, const int __c)
+{
+ return (float32x2_t)__builtin_neon_vextv2sf (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vext_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vextv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vext_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vextv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vext_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vextv2si ((int32x2_t) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vext_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vextdi ((int64x1_t) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vext_p8 (poly8x8_t __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vextv8qi ((int8x8_t) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vext_p16 (poly16x4_t __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vextv4hi ((int16x4_t) __a, (int16x4_t) __b, __c);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vextq_p64 (poly64x2_t __a, poly64x2_t __b, const int __c)
+{
+ return (poly64x2_t)__builtin_neon_vextv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+#endif
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vextq_s8 (int8x16_t __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vextv16qi (__a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vextq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vextv8hi (__a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vextq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vextv4si (__a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vextq_s64 (int64x2_t __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vextv2di (__a, __b, __c);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vextq_f32 (float32x4_t __a, float32x4_t __b, const int __c)
+{
+ return (float32x4_t)__builtin_neon_vextv4sf (__a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vextq_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vextv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vextq_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vextv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vextq_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vextv4si ((int32x4_t) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vextq_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vextv2di ((int64x2_t) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vextq_p8 (poly8x16_t __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vextv16qi ((int8x16_t) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vextq_p16 (poly16x8_t __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vextv8hi ((int16x8_t) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev64_s8 (int8x8_t __a)
+{
+ return (int8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 });
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrev64_s16 (int16x4_t __a)
+{
+ return (int16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 3, 2, 1, 0 });
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vrev64_s32 (int32x2_t __a)
+{
+ return (int32x2_t) __builtin_shuffle (__a, (uint32x2_t) { 1, 0 });
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vrev64_f32 (float32x2_t __a)
+{
+ return (float32x2_t) __builtin_shuffle (__a, (uint32x2_t) { 1, 0 });
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev64_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 });
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrev64_u16 (uint16x4_t __a)
+{
+ return (uint16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 3, 2, 1, 0 });
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vrev64_u32 (uint32x2_t __a)
+{
+ return (uint32x2_t) __builtin_shuffle (__a, (uint32x2_t) { 1, 0 });
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev64_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 });
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vrev64_p16 (poly16x4_t __a)
+{
+ return (poly16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 3, 2, 1, 0 });
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev64q_s8 (int8x16_t __a)
+{
+ return (int8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 });
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrev64q_s16 (int16x8_t __a)
+{
+ return (int16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vrev64q_s32 (int32x4_t __a)
+{
+ return (int32x4_t) __builtin_shuffle (__a, (uint32x4_t) { 1, 0, 3, 2 });
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vrev64q_f32 (float32x4_t __a)
+{
+ return (float32x4_t) __builtin_shuffle (__a, (uint32x4_t) { 1, 0, 3, 2 });
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev64q_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 });
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrev64q_u16 (uint16x8_t __a)
+{
+ return (uint16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vrev64q_u32 (uint32x4_t __a)
+{
+ return (uint32x4_t) __builtin_shuffle (__a, (uint32x4_t) { 1, 0, 3, 2 });
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev64q_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 });
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vrev64q_p16 (poly16x8_t __a)
+{
+ return (poly16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev32_s8 (int8x8_t __a)
+{
+ return (int8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vrev32_s16 (int16x4_t __a)
+{
+ return (int16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 1, 0, 3, 2 });
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev32_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vrev32_u16 (uint16x4_t __a)
+{
+ return (uint16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 1, 0, 3, 2 });
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev32_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vrev32_p16 (poly16x4_t __a)
+{
+ return (poly16x4_t) __builtin_shuffle (__a, (uint16x4_t) { 1, 0, 3, 2 });
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev32q_s8 (int8x16_t __a)
+{
+ return (int8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 });
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vrev32q_s16 (int16x8_t __a)
+{
+ return (int16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev32q_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 });
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vrev32q_u16 (uint16x8_t __a)
+{
+ return (uint16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev32q_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 });
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vrev32q_p16 (poly16x8_t __a)
+{
+ return (poly16x8_t) __builtin_shuffle (__a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vrev16_s8 (int8x8_t __a)
+{
+ return (int8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vrev16_u8 (uint8x8_t __a)
+{
+ return (uint8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vrev16_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t) __builtin_shuffle (__a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vrev16q_s8 (int8x16_t __a)
+{
+ return (int8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 });
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vrev16q_u8 (uint8x16_t __a)
+{
+ return (uint8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 });
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vrev16q_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t) __builtin_shuffle (__a, (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 });
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vbsl_p64 (uint64x1_t __a, poly64x1_t __b, poly64x1_t __c)
+{
+ return (poly64x1_t)__builtin_neon_vbsldi ((int64x1_t) __a, __b, __c);
+}
+
+#endif
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vbsl_s8 (uint8x8_t __a, int8x8_t __b, int8x8_t __c)
+{
+ return (int8x8_t)__builtin_neon_vbslv8qi ((int8x8_t) __a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vbsl_s16 (uint16x4_t __a, int16x4_t __b, int16x4_t __c)
+{
+ return (int16x4_t)__builtin_neon_vbslv4hi ((int16x4_t) __a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vbsl_s32 (uint32x2_t __a, int32x2_t __b, int32x2_t __c)
+{
+ return (int32x2_t)__builtin_neon_vbslv2si ((int32x2_t) __a, __b, __c);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vbsl_s64 (uint64x1_t __a, int64x1_t __b, int64x1_t __c)
+{
+ return (int64x1_t)__builtin_neon_vbsldi ((int64x1_t) __a, __b, __c);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vbsl_f32 (uint32x2_t __a, float32x2_t __b, float32x2_t __c)
+{
+ return (float32x2_t)__builtin_neon_vbslv2sf ((int32x2_t) __a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vbsl_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
+{
+ return (uint8x8_t)__builtin_neon_vbslv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vbsl_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
+{
+ return (uint16x4_t)__builtin_neon_vbslv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vbsl_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
+{
+ return (uint32x2_t)__builtin_neon_vbslv2si ((int32x2_t) __a, (int32x2_t) __b, (int32x2_t) __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vbsl_u64 (uint64x1_t __a, uint64x1_t __b, uint64x1_t __c)
+{
+ return (uint64x1_t)__builtin_neon_vbsldi ((int64x1_t) __a, (int64x1_t) __b, (int64x1_t) __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vbsl_p8 (uint8x8_t __a, poly8x8_t __b, poly8x8_t __c)
+{
+ return (poly8x8_t)__builtin_neon_vbslv8qi ((int8x8_t) __a, (int8x8_t) __b, (int8x8_t) __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vbsl_p16 (uint16x4_t __a, poly16x4_t __b, poly16x4_t __c)
+{
+ return (poly16x4_t)__builtin_neon_vbslv4hi ((int16x4_t) __a, (int16x4_t) __b, (int16x4_t) __c);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vbslq_p64 (uint64x2_t __a, poly64x2_t __b, poly64x2_t __c)
+{
+ return (poly64x2_t)__builtin_neon_vbslv2di ((int64x2_t) __a, (int64x2_t) __b, (int64x2_t) __c);
+}
+
+#endif
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vbslq_s8 (uint8x16_t __a, int8x16_t __b, int8x16_t __c)
+{
+ return (int8x16_t)__builtin_neon_vbslv16qi ((int8x16_t) __a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vbslq_s16 (uint16x8_t __a, int16x8_t __b, int16x8_t __c)
+{
+ return (int16x8_t)__builtin_neon_vbslv8hi ((int16x8_t) __a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vbslq_s32 (uint32x4_t __a, int32x4_t __b, int32x4_t __c)
+{
+ return (int32x4_t)__builtin_neon_vbslv4si ((int32x4_t) __a, __b, __c);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vbslq_s64 (uint64x2_t __a, int64x2_t __b, int64x2_t __c)
+{
+ return (int64x2_t)__builtin_neon_vbslv2di ((int64x2_t) __a, __b, __c);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vbslq_f32 (uint32x4_t __a, float32x4_t __b, float32x4_t __c)
+{
+ return (float32x4_t)__builtin_neon_vbslv4sf ((int32x4_t) __a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vbslq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
+{
+ return (uint8x16_t)__builtin_neon_vbslv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vbslq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
+{
+ return (uint16x8_t)__builtin_neon_vbslv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vbslq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
+{
+ return (uint32x4_t)__builtin_neon_vbslv4si ((int32x4_t) __a, (int32x4_t) __b, (int32x4_t) __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vbslq_u64 (uint64x2_t __a, uint64x2_t __b, uint64x2_t __c)
+{
+ return (uint64x2_t)__builtin_neon_vbslv2di ((int64x2_t) __a, (int64x2_t) __b, (int64x2_t) __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vbslq_p8 (uint8x16_t __a, poly8x16_t __b, poly8x16_t __c)
+{
+ return (poly8x16_t)__builtin_neon_vbslv16qi ((int8x16_t) __a, (int8x16_t) __b, (int8x16_t) __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vbslq_p16 (uint16x8_t __a, poly16x8_t __b, poly16x8_t __c)
+{
+ return (poly16x8_t)__builtin_neon_vbslv8hi ((int16x8_t) __a, (int16x8_t) __b, (int16x8_t) __c);
+}
+
+/* For big-endian, the shuffle masks for ZIP, UZP and TRN must be changed as
+ follows. (nelt = the number of elements within a vector.)
+
+ Firstly, a value of N within a mask, becomes (N ^ (nelt - 1)), as gcc vector
+ extension's indexing scheme is reversed *within each vector* (relative to the
+ neon intrinsics view), but without changing which of the two vectors.
+
+ Secondly, the elements within each mask are reversed, as the mask is itself a
+ vector, and will itself be loaded in reverse order (again, relative to the
+ neon intrinsics view, i.e. that would result from a "vld1" instruction). */
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vtrn_s8 (int8x8_t __a, int8x8_t __b)
+{
+ int8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 9, 1, 11, 3, 13, 5, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 0, 10, 2, 12, 4, 14, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 8, 2, 10, 4, 12, 6, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 1, 9, 3, 11, 5, 13, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vtrn_s16 (int16x4_t __a, int16x4_t __b)
+{
+ int16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 1, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 6, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 2, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 5, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vtrn_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ uint8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 9, 1, 11, 3, 13, 5, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 0, 10, 2, 12, 4, 14, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 8, 2, 10, 4, 12, 6, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 1, 9, 3, 11, 5, 13, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vtrn_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ uint16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 1, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 6, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 2, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 5, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vtrn_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ poly8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 9, 1, 11, 3, 13, 5, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 0, 10, 2, 12, 4, 14, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 8, 2, 10, 4, 12, 6, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 1, 9, 3, 11, 5, 13, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vtrn_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ poly16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 1, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 6, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 2, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 5, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vtrn_s32 (int32x2_t __a, int32x2_t __b)
+{
+ int32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vtrn_f32 (float32x2_t __a, float32x2_t __b)
+{
+ float32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vtrn_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ uint32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ int8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 17, 1, 19, 3, 21, 5, 23, 7, 25, 9, 27, 11, 29, 13, 31, 15 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 16, 0, 18, 2, 20, 4, 22, 6, 24, 8, 26, 10, 28, 12, 30, 14 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ int16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 9, 1, 11, 3, 13, 5, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 8, 0, 10, 2, 12, 4, 14, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 8, 2, 10, 4, 12, 6, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 1, 9, 3, 11, 5, 13, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ int32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 1, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 4, 0, 6, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 2, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 5, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ float32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 1, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 4, 0, 6, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 2, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 5, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ uint8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 17, 1, 19, 3, 21, 5, 23, 7, 25, 9, 27, 11, 29, 13, 31, 15 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 16, 0, 18, 2, 20, 4, 22, 6, 24, 8, 26, 10, 28, 12, 30, 14 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ uint16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 9, 1, 11, 3, 13, 5, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 8, 0, 10, 2, 12, 4, 14, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 8, 2, 10, 4, 12, 6, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 1, 9, 3, 11, 5, 13, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vtrnq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ uint32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 1, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 4, 0, 6, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 2, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 5, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vtrnq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ poly8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 17, 1, 19, 3, 21, 5, 23, 7, 25, 9, 27, 11, 29, 13, 31, 15 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 16, 0, 18, 2, 20, 4, 22, 6, 24, 8, 26, 10, 28, 12, 30, 14 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vtrnq_p16 (poly16x8_t __a, poly16x8_t __b)
+{
+ poly16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 9, 1, 11, 3, 13, 5, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 8, 0, 10, 2, 12, 4, 14, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 8, 2, 10, 4, 12, 6, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 1, 9, 3, 11, 5, 13, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vzip_s8 (int8x8_t __a, int8x8_t __b)
+{
+ int8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 12, 4, 13, 5, 14, 6, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 0, 9, 1, 10, 2, 11, 3 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 8, 1, 9, 2, 10, 3, 11 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 4, 12, 5, 13, 6, 14, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vzip_s16 (int16x4_t __a, int16x4_t __b)
+{
+ int16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 6, 2, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 5, 1 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 1, 5 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 2, 6, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vzip_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ uint8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 12, 4, 13, 5, 14, 6, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 0, 9, 1, 10, 2, 11, 3 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 8, 1, 9, 2, 10, 3, 11 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 4, 12, 5, 13, 6, 14, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vzip_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ uint16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 6, 2, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 5, 1 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 1, 5 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 2, 6, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vzip_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ poly8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 12, 4, 13, 5, 14, 6, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 0, 9, 1, 10, 2, 11, 3 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 8, 1, 9, 2, 10, 3, 11 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 4, 12, 5, 13, 6, 14, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vzip_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ poly16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 6, 2, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 0, 5, 1 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 4, 1, 5 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 2, 6, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vzip_s32 (int32x2_t __a, int32x2_t __b)
+{
+ int32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vzip_f32 (float32x2_t __a, float32x2_t __b)
+{
+ float32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vzip_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ uint32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vzipq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ int8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 24, 8, 25, 9, 26, 10, 27, 11, 28, 12, 29, 13, 30, 14, 31, 15 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 16, 0, 17, 1, 18, 2, 19, 3, 20, 4, 21, 5, 22, 6, 23, 7 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vzipq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ int16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 12, 4, 13, 5, 14, 6, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 8, 0, 9, 1, 10, 2, 11, 3 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 8, 1, 9, 2, 10, 3, 11 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 4, 12, 5, 13, 6, 14, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vzipq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ int32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 6, 2, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 4, 0, 5, 1 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 1, 5 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 2, 6, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vzipq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ float32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 6, 2, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 4, 0, 5, 1 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 1, 5 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 2, 6, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vzipq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ uint8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 24, 8, 25, 9, 26, 10, 27, 11, 28, 12, 29, 13, 30, 14, 31, 15 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 16, 0, 17, 1, 18, 2, 19, 3, 20, 4, 21, 5, 22, 6, 23, 7 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vzipq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ uint16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 12, 4, 13, 5, 14, 6, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 8, 0, 9, 1, 10, 2, 11, 3 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 8, 1, 9, 2, 10, 3, 11 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 4, 12, 5, 13, 6, 14, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vzipq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ uint32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 6, 2, 7, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 4, 0, 5, 1 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 4, 1, 5 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 2, 6, 3, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vzipq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ poly8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 24, 8, 25, 9, 26, 10, 27, 11, 28, 12, 29, 13, 30, 14, 31, 15 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 16, 0, 17, 1, 18, 2, 19, 3, 20, 4, 21, 5, 22, 6, 23, 7 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vzipq_p16 (poly16x8_t __a, poly16x8_t __b)
+{
+ poly16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 12, 4, 13, 5, 14, 6, 15, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 8, 0, 9, 1, 10, 2, 11, 3 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 8, 1, 9, 2, 10, 3, 11 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 4, 12, 5, 13, 6, 14, 7, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vuzp_s8 (int8x8_t __a, int8x8_t __b)
+{
+ int8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 9, 11, 13, 15, 1, 3, 5, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 10, 12, 14, 0, 2, 4, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vuzp_s16 (int16x4_t __a, int16x4_t __b)
+{
+ int16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 7, 1, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 6, 0, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 2, 4, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 3, 5, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vuzp_s32 (int32x2_t __a, int32x2_t __b)
+{
+ int32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vuzp_f32 (float32x2_t __a, float32x2_t __b)
+{
+ float32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vuzp_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ uint8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 9, 11, 13, 15, 1, 3, 5, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 10, 12, 14, 0, 2, 4, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vuzp_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ uint16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 7, 1, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 6, 0, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 2, 4, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 3, 5, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vuzp_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ uint32x2x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 3, 1 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 2, 0 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x2_t) { 0, 2 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x2_t) { 1, 3 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vuzp_p8 (poly8x8_t __a, poly8x8_t __b)
+{
+ poly8x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 9, 11, 13, 15, 1, 3, 5, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 8, 10, 12, 14, 0, 2, 4, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x8_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vuzp_p16 (poly16x4_t __a, poly16x4_t __b)
+{
+ poly16x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 5, 7, 1, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 4, 6, 0, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x4_t) { 0, 2, 4, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x4_t) { 1, 3, 5, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vuzpq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ int8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 17, 19, 21, 23, 25, 27, 29, 31, 1, 3, 5, 7, 9, 11, 13, 15 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 16, 18, 20, 22, 24, 26, 28, 30, 0, 2, 4, 6, 8, 10, 12, 14 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vuzpq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ int16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 9, 11, 13, 15, 1, 3, 5, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 8, 10, 12, 14, 0, 2, 4, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vuzpq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ int32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 7, 1, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 4, 6, 0, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 2, 4, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 3, 5, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vuzpq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ float32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 7, 1, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 4, 6, 0, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 2, 4, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 3, 5, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vuzpq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ uint8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 17, 19, 21, 23, 25, 27, 29, 31, 1, 3, 5, 7, 9, 11, 13, 15 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 16, 18, 20, 22, 24, 26, 28, 30, 0, 2, 4, 6, 8, 10, 12, 14 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vuzpq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ uint16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 9, 11, 13, 15, 1, 3, 5, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 8, 10, 12, 14, 0, 2, 4, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vuzpq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ uint32x4x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 5, 7, 1, 3 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 4, 6, 0, 2 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint32x4_t) { 0, 2, 4, 6 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint32x4_t) { 1, 3, 5, 7 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vuzpq_p8 (poly8x16_t __a, poly8x16_t __b)
+{
+ poly8x16x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 17, 19, 21, 23, 25, 27, 29, 31, 1, 3, 5, 7, 9, 11, 13, 15 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 16, 18, 20, 22, 24, 26, 28, 30, 0, 2, 4, 6, 8, 10, 12, 14 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint8x16_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 });
+#endif
+ return __rv;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vuzpq_p16 (poly16x8_t __a, poly16x8_t __b)
+{
+ poly16x8x2_t __rv;
+#ifdef __ARM_BIG_ENDIAN
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 9, 11, 13, 15, 1, 3, 5, 7 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 8, 10, 12, 14, 0, 2, 4, 6 });
+#else
+ __rv.val[0] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 0, 2, 4, 6, 8, 10, 12, 14 });
+ __rv.val[1] = __builtin_shuffle (__a, __b, (uint16x8_t)
+ { 1, 3, 5, 7, 9, 11, 13, 15 });
+#endif
+ return __rv;
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vld1_p64 (const poly64_t * __a)
+{
+ return (poly64x1_t)__builtin_neon_vld1di ((const __builtin_neon_di *) __a);
+}
+
+#endif
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vld1_s8 (const int8_t * __a)
+{
+ return (int8x8_t)__builtin_neon_vld1v8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vld1_s16 (const int16_t * __a)
+{
+ return (int16x4_t)__builtin_neon_vld1v4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vld1_s32 (const int32_t * __a)
+{
+ return (int32x2_t)__builtin_neon_vld1v2si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vld1_s64 (const int64_t * __a)
+{
+ return (int64x1_t)__builtin_neon_vld1di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vld1_f32 (const float32_t * __a)
+{
+ return (float32x2_t)__builtin_neon_vld1v2sf ((const __builtin_neon_sf *) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vld1_u8 (const uint8_t * __a)
+{
+ return (uint8x8_t)__builtin_neon_vld1v8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vld1_u16 (const uint16_t * __a)
+{
+ return (uint16x4_t)__builtin_neon_vld1v4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vld1_u32 (const uint32_t * __a)
+{
+ return (uint32x2_t)__builtin_neon_vld1v2si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vld1_u64 (const uint64_t * __a)
+{
+ return (uint64x1_t)__builtin_neon_vld1di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vld1_p8 (const poly8_t * __a)
+{
+ return (poly8x8_t)__builtin_neon_vld1v8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vld1_p16 (const poly16_t * __a)
+{
+ return (poly16x4_t)__builtin_neon_vld1v4hi ((const __builtin_neon_hi *) __a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vld1q_p64 (const poly64_t * __a)
+{
+ return (poly64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a);
+}
+
+#endif
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vld1q_s8 (const int8_t * __a)
+{
+ return (int8x16_t)__builtin_neon_vld1v16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vld1q_s16 (const int16_t * __a)
+{
+ return (int16x8_t)__builtin_neon_vld1v8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vld1q_s32 (const int32_t * __a)
+{
+ return (int32x4_t)__builtin_neon_vld1v4si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vld1q_s64 (const int64_t * __a)
+{
+ return (int64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vld1q_f32 (const float32_t * __a)
+{
+ return (float32x4_t)__builtin_neon_vld1v4sf ((const __builtin_neon_sf *) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vld1q_u8 (const uint8_t * __a)
+{
+ return (uint8x16_t)__builtin_neon_vld1v16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vld1q_u16 (const uint16_t * __a)
+{
+ return (uint16x8_t)__builtin_neon_vld1v8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vld1q_u32 (const uint32_t * __a)
+{
+ return (uint32x4_t)__builtin_neon_vld1v4si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vld1q_u64 (const uint64_t * __a)
+{
+ return (uint64x2_t)__builtin_neon_vld1v2di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vld1q_p8 (const poly8_t * __a)
+{
+ return (poly8x16_t)__builtin_neon_vld1v16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vld1q_p16 (const poly16_t * __a)
+{
+ return (poly16x8_t)__builtin_neon_vld1v8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vld1_lane_s8 (const int8_t * __a, int8x8_t __b, const int __c)
+{
+ return (int8x8_t)__builtin_neon_vld1_lanev8qi ((const __builtin_neon_qi *) __a, __b, __c);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vld1_lane_s16 (const int16_t * __a, int16x4_t __b, const int __c)
+{
+ return (int16x4_t)__builtin_neon_vld1_lanev4hi ((const __builtin_neon_hi *) __a, __b, __c);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vld1_lane_s32 (const int32_t * __a, int32x2_t __b, const int __c)
+{
+ return (int32x2_t)__builtin_neon_vld1_lanev2si ((const __builtin_neon_si *) __a, __b, __c);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vld1_lane_f32 (const float32_t * __a, float32x2_t __b, const int __c)
+{
+ return (float32x2_t)__builtin_neon_vld1_lanev2sf ((const __builtin_neon_sf *) __a, __b, __c);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vld1_lane_u8 (const uint8_t * __a, uint8x8_t __b, const int __c)
+{
+ return (uint8x8_t)__builtin_neon_vld1_lanev8qi ((const __builtin_neon_qi *) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vld1_lane_u16 (const uint16_t * __a, uint16x4_t __b, const int __c)
+{
+ return (uint16x4_t)__builtin_neon_vld1_lanev4hi ((const __builtin_neon_hi *) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vld1_lane_u32 (const uint32_t * __a, uint32x2_t __b, const int __c)
+{
+ return (uint32x2_t)__builtin_neon_vld1_lanev2si ((const __builtin_neon_si *) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vld1_lane_p8 (const poly8_t * __a, poly8x8_t __b, const int __c)
+{
+ return (poly8x8_t)__builtin_neon_vld1_lanev8qi ((const __builtin_neon_qi *) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vld1_lane_p16 (const poly16_t * __a, poly16x4_t __b, const int __c)
+{
+ return (poly16x4_t)__builtin_neon_vld1_lanev4hi ((const __builtin_neon_hi *) __a, (int16x4_t) __b, __c);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vld1_lane_p64 (const poly64_t * __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t)__builtin_neon_vld1_lanedi ((const __builtin_neon_di *) __a, __b, __c);
+}
+
+#endif
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vld1_lane_s64 (const int64_t * __a, int64x1_t __b, const int __c)
+{
+ return (int64x1_t)__builtin_neon_vld1_lanedi ((const __builtin_neon_di *) __a, __b, __c);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vld1_lane_u64 (const uint64_t * __a, uint64x1_t __b, const int __c)
+{
+ return (uint64x1_t)__builtin_neon_vld1_lanedi ((const __builtin_neon_di *) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vld1q_lane_s8 (const int8_t * __a, int8x16_t __b, const int __c)
+{
+ return (int8x16_t)__builtin_neon_vld1_lanev16qi ((const __builtin_neon_qi *) __a, __b, __c);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vld1q_lane_s16 (const int16_t * __a, int16x8_t __b, const int __c)
+{
+ return (int16x8_t)__builtin_neon_vld1_lanev8hi ((const __builtin_neon_hi *) __a, __b, __c);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vld1q_lane_s32 (const int32_t * __a, int32x4_t __b, const int __c)
+{
+ return (int32x4_t)__builtin_neon_vld1_lanev4si ((const __builtin_neon_si *) __a, __b, __c);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vld1q_lane_f32 (const float32_t * __a, float32x4_t __b, const int __c)
+{
+ return (float32x4_t)__builtin_neon_vld1_lanev4sf ((const __builtin_neon_sf *) __a, __b, __c);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vld1q_lane_u8 (const uint8_t * __a, uint8x16_t __b, const int __c)
+{
+ return (uint8x16_t)__builtin_neon_vld1_lanev16qi ((const __builtin_neon_qi *) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vld1q_lane_u16 (const uint16_t * __a, uint16x8_t __b, const int __c)
+{
+ return (uint16x8_t)__builtin_neon_vld1_lanev8hi ((const __builtin_neon_hi *) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vld1q_lane_u32 (const uint32_t * __a, uint32x4_t __b, const int __c)
+{
+ return (uint32x4_t)__builtin_neon_vld1_lanev4si ((const __builtin_neon_si *) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vld1q_lane_p8 (const poly8_t * __a, poly8x16_t __b, const int __c)
+{
+ return (poly8x16_t)__builtin_neon_vld1_lanev16qi ((const __builtin_neon_qi *) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vld1q_lane_p16 (const poly16_t * __a, poly16x8_t __b, const int __c)
+{
+ return (poly16x8_t)__builtin_neon_vld1_lanev8hi ((const __builtin_neon_hi *) __a, (int16x8_t) __b, __c);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vld1q_lane_p64 (const poly64_t * __a, poly64x2_t __b, const int __c)
+{
+ return (poly64x2_t)__builtin_neon_vld1_lanev2di ((const __builtin_neon_di *) __a, (int64x2_t) __b, __c);
+}
+
+#endif
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vld1q_lane_s64 (const int64_t * __a, int64x2_t __b, const int __c)
+{
+ return (int64x2_t)__builtin_neon_vld1_lanev2di ((const __builtin_neon_di *) __a, __b, __c);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vld1q_lane_u64 (const uint64_t * __a, uint64x2_t __b, const int __c)
+{
+ return (uint64x2_t)__builtin_neon_vld1_lanev2di ((const __builtin_neon_di *) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vld1_dup_s8 (const int8_t * __a)
+{
+ return (int8x8_t)__builtin_neon_vld1_dupv8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vld1_dup_s16 (const int16_t * __a)
+{
+ return (int16x4_t)__builtin_neon_vld1_dupv4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vld1_dup_s32 (const int32_t * __a)
+{
+ return (int32x2_t)__builtin_neon_vld1_dupv2si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vld1_dup_f32 (const float32_t * __a)
+{
+ return (float32x2_t)__builtin_neon_vld1_dupv2sf ((const __builtin_neon_sf *) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vld1_dup_u8 (const uint8_t * __a)
+{
+ return (uint8x8_t)__builtin_neon_vld1_dupv8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vld1_dup_u16 (const uint16_t * __a)
+{
+ return (uint16x4_t)__builtin_neon_vld1_dupv4hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vld1_dup_u32 (const uint32_t * __a)
+{
+ return (uint32x2_t)__builtin_neon_vld1_dupv2si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vld1_dup_p8 (const poly8_t * __a)
+{
+ return (poly8x8_t)__builtin_neon_vld1_dupv8qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vld1_dup_p16 (const poly16_t * __a)
+{
+ return (poly16x4_t)__builtin_neon_vld1_dupv4hi ((const __builtin_neon_hi *) __a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vld1_dup_p64 (const poly64_t * __a)
+{
+ return (poly64x1_t)__builtin_neon_vld1_dupdi ((const __builtin_neon_di *) __a);
+}
+
+#endif
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vld1_dup_s64 (const int64_t * __a)
+{
+ return (int64x1_t)__builtin_neon_vld1_dupdi ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vld1_dup_u64 (const uint64_t * __a)
+{
+ return (uint64x1_t)__builtin_neon_vld1_dupdi ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_s8 (const int8_t * __a)
+{
+ return (int8x16_t)__builtin_neon_vld1_dupv16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_s16 (const int16_t * __a)
+{
+ return (int16x8_t)__builtin_neon_vld1_dupv8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_s32 (const int32_t * __a)
+{
+ return (int32x4_t)__builtin_neon_vld1_dupv4si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_f32 (const float32_t * __a)
+{
+ return (float32x4_t)__builtin_neon_vld1_dupv4sf ((const __builtin_neon_sf *) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_u8 (const uint8_t * __a)
+{
+ return (uint8x16_t)__builtin_neon_vld1_dupv16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_u16 (const uint16_t * __a)
+{
+ return (uint16x8_t)__builtin_neon_vld1_dupv8hi ((const __builtin_neon_hi *) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vld1q_dup_u32 (const uint32_t * __a)
+{
+ return (uint32x4_t)__builtin_neon_vld1_dupv4si ((const __builtin_neon_si *) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vld1q_dup_p8 (const poly8_t * __a)
+{
+ return (poly8x16_t)__builtin_neon_vld1_dupv16qi ((const __builtin_neon_qi *) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vld1q_dup_p16 (const poly16_t * __a)
+{
+ return (poly16x8_t)__builtin_neon_vld1_dupv8hi ((const __builtin_neon_hi *) __a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_p64 (const poly64_t * __a)
+{
+ return (poly64x2_t)__builtin_neon_vld1_dupv2di ((const __builtin_neon_di *) __a);
+}
+
+#endif
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_s64 (const int64_t * __a)
+{
+ return (int64x2_t)__builtin_neon_vld1_dupv2di ((const __builtin_neon_di *) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vld1q_dup_u64 (const uint64_t * __a)
+{
+ return (uint64x2_t)__builtin_neon_vld1_dupv2di ((const __builtin_neon_di *) __a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_p64 (poly64_t * __a, poly64x1_t __b)
+{
+ __builtin_neon_vst1di ((__builtin_neon_di *) __a, __b);
+}
+
+#endif
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s8 (int8_t * __a, int8x8_t __b)
+{
+ __builtin_neon_vst1v8qi ((__builtin_neon_qi *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s16 (int16_t * __a, int16x4_t __b)
+{
+ __builtin_neon_vst1v4hi ((__builtin_neon_hi *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s32 (int32_t * __a, int32x2_t __b)
+{
+ __builtin_neon_vst1v2si ((__builtin_neon_si *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_s64 (int64_t * __a, int64x1_t __b)
+{
+ __builtin_neon_vst1di ((__builtin_neon_di *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_f32 (float32_t * __a, float32x2_t __b)
+{
+ __builtin_neon_vst1v2sf ((__builtin_neon_sf *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u8 (uint8_t * __a, uint8x8_t __b)
+{
+ __builtin_neon_vst1v8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u16 (uint16_t * __a, uint16x4_t __b)
+{
+ __builtin_neon_vst1v4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u32 (uint32_t * __a, uint32x2_t __b)
+{
+ __builtin_neon_vst1v2si ((__builtin_neon_si *) __a, (int32x2_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_u64 (uint64_t * __a, uint64x1_t __b)
+{
+ __builtin_neon_vst1di ((__builtin_neon_di *) __a, (int64x1_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_p8 (poly8_t * __a, poly8x8_t __b)
+{
+ __builtin_neon_vst1v8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_p16 (poly16_t * __a, poly16x4_t __b)
+{
+ __builtin_neon_vst1v4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_p64 (poly64_t * __a, poly64x2_t __b)
+{
+ __builtin_neon_vst1v2di ((__builtin_neon_di *) __a, (int64x2_t) __b);
+}
+
+#endif
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s8 (int8_t * __a, int8x16_t __b)
+{
+ __builtin_neon_vst1v16qi ((__builtin_neon_qi *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s16 (int16_t * __a, int16x8_t __b)
+{
+ __builtin_neon_vst1v8hi ((__builtin_neon_hi *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s32 (int32_t * __a, int32x4_t __b)
+{
+ __builtin_neon_vst1v4si ((__builtin_neon_si *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_s64 (int64_t * __a, int64x2_t __b)
+{
+ __builtin_neon_vst1v2di ((__builtin_neon_di *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_f32 (float32_t * __a, float32x4_t __b)
+{
+ __builtin_neon_vst1v4sf ((__builtin_neon_sf *) __a, __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u8 (uint8_t * __a, uint8x16_t __b)
+{
+ __builtin_neon_vst1v16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u16 (uint16_t * __a, uint16x8_t __b)
+{
+ __builtin_neon_vst1v8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u32 (uint32_t * __a, uint32x4_t __b)
+{
+ __builtin_neon_vst1v4si ((__builtin_neon_si *) __a, (int32x4_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_u64 (uint64_t * __a, uint64x2_t __b)
+{
+ __builtin_neon_vst1v2di ((__builtin_neon_di *) __a, (int64x2_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_p8 (poly8_t * __a, poly8x16_t __b)
+{
+ __builtin_neon_vst1v16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_p16 (poly16_t * __a, poly16x8_t __b)
+{
+ __builtin_neon_vst1v8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_s8 (int8_t * __a, int8x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8qi ((__builtin_neon_qi *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_s16 (int16_t * __a, int16x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4hi ((__builtin_neon_hi *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_s32 (int32_t * __a, int32x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2si ((__builtin_neon_si *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_f32 (float32_t * __a, float32x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2sf ((__builtin_neon_sf *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_u8 (uint8_t * __a, uint8x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_u16 (uint16_t * __a, uint16x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_u32 (uint32_t * __a, uint32x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2si ((__builtin_neon_si *) __a, (int32x2_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_p8 (poly8_t * __a, poly8x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8qi ((__builtin_neon_qi *) __a, (int8x8_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_p16 (poly16_t * __a, poly16x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4hi ((__builtin_neon_hi *) __a, (int16x4_t) __b, __c);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_p64 (poly64_t * __a, poly64x1_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanedi ((__builtin_neon_di *) __a, __b, __c);
+}
+
+#endif
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_s64 (int64_t * __a, int64x1_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanedi ((__builtin_neon_di *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1_lane_u64 (uint64_t * __a, uint64x1_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanedi ((__builtin_neon_di *) __a, (int64x1_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_s8 (int8_t * __a, int8x16_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev16qi ((__builtin_neon_qi *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_s16 (int16_t * __a, int16x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8hi ((__builtin_neon_hi *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_s32 (int32_t * __a, int32x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4si ((__builtin_neon_si *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_f32 (float32_t * __a, float32x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4sf ((__builtin_neon_sf *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_u8 (uint8_t * __a, uint8x16_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_u16 (uint16_t * __a, uint16x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_u32 (uint32_t * __a, uint32x4_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev4si ((__builtin_neon_si *) __a, (int32x4_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_p8 (poly8_t * __a, poly8x16_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev16qi ((__builtin_neon_qi *) __a, (int8x16_t) __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_p16 (poly16_t * __a, poly16x8_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev8hi ((__builtin_neon_hi *) __a, (int16x8_t) __b, __c);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_p64 (poly64_t * __a, poly64x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2di ((__builtin_neon_di *) __a, (int64x2_t) __b, __c);
+}
+
+#endif
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_s64 (int64_t * __a, int64x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2di ((__builtin_neon_di *) __a, __b, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst1q_lane_u64 (uint64_t * __a, uint64x2_t __b, const int __c)
+{
+ __builtin_neon_vst1_lanev2di ((__builtin_neon_di *) __a, (int64x2_t) __b, __c);
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vld2_s8 (const int8_t * __a)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vld2_s16 (const int16_t * __a)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vld2_s32 (const int32_t * __a)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vld2_f32 (const float32_t * __a)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vld2_u8 (const uint8_t * __a)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vld2_u16 (const uint16_t * __a)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vld2_u32 (const uint32_t * __a)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vld2_p8 (const poly8_t * __a)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vld2_p16 (const poly16_t * __a)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x2_t __attribute__ ((__always_inline__))
+vld2_p64 (const poly64_t * __a)
+{
+ union { poly64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
+__extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__))
+vld2_s64 (const int64_t * __a)
+{
+ union { int64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x2_t __attribute__ ((__always_inline__))
+vld2_u64 (const uint64_t * __a)
+{
+ union { uint64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+vld2q_s8 (const int8_t * __a)
+{
+ union { int8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vld2q_s16 (const int16_t * __a)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vld2q_s32 (const int32_t * __a)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vld2q_f32 (const float32_t * __a)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+vld2q_u8 (const uint8_t * __a)
+{
+ union { uint8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vld2q_u16 (const uint16_t * __a)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vld2q_u32 (const uint32_t * __a)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+vld2q_p8 (const poly8_t * __a)
+{
+ union { poly8x16x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vld2q_p16 (const poly16_t * __a)
+{
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vld2_lane_s8 (const int8_t * __a, int8x8x2_t __b, const int __c)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vld2_lane_s16 (const int16_t * __a, int16x4x2_t __b, const int __c)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vld2_lane_s32 (const int32_t * __a, int32x2x2_t __b, const int __c)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vld2_lane_f32 (const float32_t * __a, float32x2x2_t __b, const int __c)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev2sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vld2_lane_u8 (const uint8_t * __a, uint8x8x2_t __b, const int __c)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vld2_lane_u16 (const uint16_t * __a, uint16x4x2_t __b, const int __c)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vld2_lane_u32 (const uint32_t * __a, uint32x2x2_t __b, const int __c)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vld2_lane_p8 (const poly8_t * __a, poly8x8x2_t __b, const int __c)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vld2_lane_p16 (const poly16_t * __a, poly16x4x2_t __b, const int __c)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+vld2q_lane_s16 (const int16_t * __a, int16x8x2_t __b, const int __c)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+vld2q_lane_s32 (const int32_t * __a, int32x4x2_t __b, const int __c)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+vld2q_lane_f32 (const float32_t * __a, float32x4x2_t __b, const int __c)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+vld2q_lane_u16 (const uint16_t * __a, uint16x8x2_t __b, const int __c)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+vld2q_lane_u32 (const uint32_t * __a, uint32x4x2_t __b, const int __c)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+vld2q_lane_p16 (const poly16_t * __a, poly16x8x2_t __b, const int __c)
+{
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+vld2_dup_s8 (const int8_t * __a)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+vld2_dup_s16 (const int16_t * __a)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+vld2_dup_s32 (const int32_t * __a)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+vld2_dup_f32 (const float32_t * __a)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+vld2_dup_u8 (const uint8_t * __a)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+vld2_dup_u16 (const uint16_t * __a)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+vld2_dup_u32 (const uint32_t * __a)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+vld2_dup_p8 (const poly8_t * __a)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+vld2_dup_p16 (const poly16_t * __a)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x2_t __attribute__ ((__always_inline__))
+vld2_dup_p64 (const poly64_t * __a)
+{
+ union { poly64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
+__extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__))
+vld2_dup_s64 (const int64_t * __a)
+{
+ union { int64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x2_t __attribute__ ((__always_inline__))
+vld2_dup_u64 (const uint64_t * __a)
+{
+ union { uint64x1x2_t __i; __builtin_neon_ti __o; } __rv;
+ __rv.__o = __builtin_neon_vld2_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s8 (int8_t * __a, int8x8x2_t __b)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s16 (int16_t * __a, int16x4x2_t __b)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s32 (int32_t * __a, int32x2x2_t __b)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_f32 (float32_t * __a, float32x2x2_t __b)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v2sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u8 (uint8_t * __a, uint8x8x2_t __b)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u16 (uint16_t * __a, uint16x4x2_t __b)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u32 (uint32_t * __a, uint32x2x2_t __b)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_p8 (poly8_t * __a, poly8x8x2_t __b)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_p16 (poly16_t * __a, poly16x4x2_t __b)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_p64 (poly64_t * __a, poly64x1x2_t __b)
+{
+ union { poly64x1x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+#endif
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_s64 (int64_t * __a, int64x1x2_t __b)
+{
+ union { int64x1x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_u64 (uint64_t * __a, uint64x1x2_t __b)
+{
+ union { uint64x1x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s8 (int8_t * __a, int8x16x2_t __b)
+{
+ union { int8x16x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s16 (int16_t * __a, int16x8x2_t __b)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_s32 (int32_t * __a, int32x4x2_t __b)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_f32 (float32_t * __a, float32x4x2_t __b)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v4sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u8 (uint8_t * __a, uint8x16x2_t __b)
+{
+ union { uint8x16x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u16 (uint16_t * __a, uint16x8x2_t __b)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_u32 (uint32_t * __a, uint32x4x2_t __b)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_p8 (poly8_t * __a, poly8x16x2_t __b)
+{
+ union { poly8x16x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_p16 (poly16_t * __a, poly16x8x2_t __b)
+{
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_s8 (int8_t * __a, int8x8x2_t __b, const int __c)
+{
+ union { int8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_s16 (int16_t * __a, int16x4x2_t __b, const int __c)
+{
+ union { int16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_s32 (int32_t * __a, int32x2x2_t __b, const int __c)
+{
+ union { int32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_f32 (float32_t * __a, float32x2x2_t __b, const int __c)
+{
+ union { float32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev2sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_u8 (uint8_t * __a, uint8x8x2_t __b, const int __c)
+{
+ union { uint8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_u16 (uint16_t * __a, uint16x4x2_t __b, const int __c)
+{
+ union { uint16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_u32 (uint32_t * __a, uint32x2x2_t __b, const int __c)
+{
+ union { uint32x2x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_p8 (poly8_t * __a, poly8x8x2_t __b, const int __c)
+{
+ union { poly8x8x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2_lane_p16 (poly16_t * __a, poly16x4x2_t __b, const int __c)
+{
+ union { poly16x4x2_t __i; __builtin_neon_ti __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_s16 (int16_t * __a, int16x8x2_t __b, const int __c)
+{
+ union { int16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_s32 (int32_t * __a, int32x4x2_t __b, const int __c)
+{
+ union { int32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_f32 (float32_t * __a, float32x4x2_t __b, const int __c)
+{
+ union { float32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_u16 (uint16_t * __a, uint16x8x2_t __b, const int __c)
+{
+ union { uint16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_u32 (uint32_t * __a, uint32x4x2_t __b, const int __c)
+{
+ union { uint32x4x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst2q_lane_p16 (poly16_t * __a, poly16x8x2_t __b, const int __c)
+{
+ union { poly16x8x2_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst2_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__))
+vld3_s8 (const int8_t * __a)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__))
+vld3_s16 (const int16_t * __a)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__))
+vld3_s32 (const int32_t * __a)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__))
+vld3_f32 (const float32_t * __a)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__))
+vld3_u8 (const uint8_t * __a)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__))
+vld3_u16 (const uint16_t * __a)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__))
+vld3_u32 (const uint32_t * __a)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__))
+vld3_p8 (const poly8_t * __a)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__))
+vld3_p16 (const poly16_t * __a)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x3_t __attribute__ ((__always_inline__))
+vld3_p64 (const poly64_t * __a)
+{
+ union { poly64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
+__extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__))
+vld3_s64 (const int64_t * __a)
+{
+ union { int64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x3_t __attribute__ ((__always_inline__))
+vld3_u64 (const uint64_t * __a)
+{
+ union { uint64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x16x3_t __attribute__ ((__always_inline__))
+vld3q_s8 (const int8_t * __a)
+{
+ union { int8x16x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x3_t __attribute__ ((__always_inline__))
+vld3q_s16 (const int16_t * __a)
+{
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x3_t __attribute__ ((__always_inline__))
+vld3q_s32 (const int32_t * __a)
+{
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x3_t __attribute__ ((__always_inline__))
+vld3q_f32 (const float32_t * __a)
+{
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x16x3_t __attribute__ ((__always_inline__))
+vld3q_u8 (const uint8_t * __a)
+{
+ union { uint8x16x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x3_t __attribute__ ((__always_inline__))
+vld3q_u16 (const uint16_t * __a)
+{
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x3_t __attribute__ ((__always_inline__))
+vld3q_u32 (const uint32_t * __a)
+{
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x16x3_t __attribute__ ((__always_inline__))
+vld3q_p8 (const poly8_t * __a)
+{
+ union { poly8x16x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x3_t __attribute__ ((__always_inline__))
+vld3q_p16 (const poly16_t * __a)
+{
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__))
+vld3_lane_s8 (const int8_t * __a, int8x8x3_t __b, const int __c)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__))
+vld3_lane_s16 (const int16_t * __a, int16x4x3_t __b, const int __c)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__))
+vld3_lane_s32 (const int32_t * __a, int32x2x3_t __b, const int __c)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__))
+vld3_lane_f32 (const float32_t * __a, float32x2x3_t __b, const int __c)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev2sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__))
+vld3_lane_u8 (const uint8_t * __a, uint8x8x3_t __b, const int __c)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__))
+vld3_lane_u16 (const uint16_t * __a, uint16x4x3_t __b, const int __c)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__))
+vld3_lane_u32 (const uint32_t * __a, uint32x2x3_t __b, const int __c)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__))
+vld3_lane_p8 (const poly8_t * __a, poly8x8x3_t __b, const int __c)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__))
+vld3_lane_p16 (const poly16_t * __a, poly16x4x3_t __b, const int __c)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x3_t __attribute__ ((__always_inline__))
+vld3q_lane_s16 (const int16_t * __a, int16x8x3_t __b, const int __c)
+{
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x3_t __attribute__ ((__always_inline__))
+vld3q_lane_s32 (const int32_t * __a, int32x4x3_t __b, const int __c)
+{
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x3_t __attribute__ ((__always_inline__))
+vld3q_lane_f32 (const float32_t * __a, float32x4x3_t __b, const int __c)
+{
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x3_t __attribute__ ((__always_inline__))
+vld3q_lane_u16 (const uint16_t * __a, uint16x8x3_t __b, const int __c)
+{
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x3_t __attribute__ ((__always_inline__))
+vld3q_lane_u32 (const uint32_t * __a, uint32x4x3_t __b, const int __c)
+{
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x3_t __attribute__ ((__always_inline__))
+vld3q_lane_p16 (const poly16_t * __a, poly16x8x3_t __b, const int __c)
+{
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__))
+vld3_dup_s8 (const int8_t * __a)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__))
+vld3_dup_s16 (const int16_t * __a)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__))
+vld3_dup_s32 (const int32_t * __a)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__))
+vld3_dup_f32 (const float32_t * __a)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__))
+vld3_dup_u8 (const uint8_t * __a)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__))
+vld3_dup_u16 (const uint16_t * __a)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__))
+vld3_dup_u32 (const uint32_t * __a)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__))
+vld3_dup_p8 (const poly8_t * __a)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__))
+vld3_dup_p16 (const poly16_t * __a)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x3_t __attribute__ ((__always_inline__))
+vld3_dup_p64 (const poly64_t * __a)
+{
+ union { poly64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
+__extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__))
+vld3_dup_s64 (const int64_t * __a)
+{
+ union { int64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x3_t __attribute__ ((__always_inline__))
+vld3_dup_u64 (const uint64_t * __a)
+{
+ union { uint64x1x3_t __i; __builtin_neon_ei __o; } __rv;
+ __rv.__o = __builtin_neon_vld3_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s8 (int8_t * __a, int8x8x3_t __b)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s16 (int16_t * __a, int16x4x3_t __b)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s32 (int32_t * __a, int32x2x3_t __b)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_f32 (float32_t * __a, float32x2x3_t __b)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v2sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u8 (uint8_t * __a, uint8x8x3_t __b)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u16 (uint16_t * __a, uint16x4x3_t __b)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u32 (uint32_t * __a, uint32x2x3_t __b)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_p8 (poly8_t * __a, poly8x8x3_t __b)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_p16 (poly16_t * __a, poly16x4x3_t __b)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_p64 (poly64_t * __a, poly64x1x3_t __b)
+{
+ union { poly64x1x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+#endif
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_s64 (int64_t * __a, int64x1x3_t __b)
+{
+ union { int64x1x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_u64 (uint64_t * __a, uint64x1x3_t __b)
+{
+ union { uint64x1x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s8 (int8_t * __a, int8x16x3_t __b)
+{
+ union { int8x16x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s16 (int16_t * __a, int16x8x3_t __b)
+{
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_s32 (int32_t * __a, int32x4x3_t __b)
+{
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_f32 (float32_t * __a, float32x4x3_t __b)
+{
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v4sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u8 (uint8_t * __a, uint8x16x3_t __b)
+{
+ union { uint8x16x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u16 (uint16_t * __a, uint16x8x3_t __b)
+{
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_u32 (uint32_t * __a, uint32x4x3_t __b)
+{
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_p8 (poly8_t * __a, poly8x16x3_t __b)
+{
+ union { poly8x16x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_p16 (poly16_t * __a, poly16x8x3_t __b)
+{
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_s8 (int8_t * __a, int8x8x3_t __b, const int __c)
+{
+ union { int8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_s16 (int16_t * __a, int16x4x3_t __b, const int __c)
+{
+ union { int16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_s32 (int32_t * __a, int32x2x3_t __b, const int __c)
+{
+ union { int32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_f32 (float32_t * __a, float32x2x3_t __b, const int __c)
+{
+ union { float32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev2sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_u8 (uint8_t * __a, uint8x8x3_t __b, const int __c)
+{
+ union { uint8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_u16 (uint16_t * __a, uint16x4x3_t __b, const int __c)
+{
+ union { uint16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_u32 (uint32_t * __a, uint32x2x3_t __b, const int __c)
+{
+ union { uint32x2x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_p8 (poly8_t * __a, poly8x8x3_t __b, const int __c)
+{
+ union { poly8x8x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3_lane_p16 (poly16_t * __a, poly16x4x3_t __b, const int __c)
+{
+ union { poly16x4x3_t __i; __builtin_neon_ei __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_s16 (int16_t * __a, int16x8x3_t __b, const int __c)
+{
+ union { int16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_s32 (int32_t * __a, int32x4x3_t __b, const int __c)
+{
+ union { int32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_f32 (float32_t * __a, float32x4x3_t __b, const int __c)
+{
+ union { float32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_u16 (uint16_t * __a, uint16x8x3_t __b, const int __c)
+{
+ union { uint16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_u32 (uint32_t * __a, uint32x4x3_t __b, const int __c)
+{
+ union { uint32x4x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst3q_lane_p16 (poly16_t * __a, poly16x8x3_t __b, const int __c)
+{
+ union { poly16x8x3_t __i; __builtin_neon_ci __o; } __bu = { __b };
+ __builtin_neon_vst3_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__))
+vld4_s8 (const int8_t * __a)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__))
+vld4_s16 (const int16_t * __a)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__))
+vld4_s32 (const int32_t * __a)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__))
+vld4_f32 (const float32_t * __a)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__))
+vld4_u8 (const uint8_t * __a)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__))
+vld4_u16 (const uint16_t * __a)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__))
+vld4_u32 (const uint32_t * __a)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__))
+vld4_p8 (const poly8_t * __a)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__))
+vld4_p16 (const poly16_t * __a)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x4_t __attribute__ ((__always_inline__))
+vld4_p64 (const poly64_t * __a)
+{
+ union { poly64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
+__extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__))
+vld4_s64 (const int64_t * __a)
+{
+ union { int64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x4_t __attribute__ ((__always_inline__))
+vld4_u64 (const uint64_t * __a)
+{
+ union { uint64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4di ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x16x4_t __attribute__ ((__always_inline__))
+vld4q_s8 (const int8_t * __a)
+{
+ union { int8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x4_t __attribute__ ((__always_inline__))
+vld4q_s16 (const int16_t * __a)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x4_t __attribute__ ((__always_inline__))
+vld4q_s32 (const int32_t * __a)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x4_t __attribute__ ((__always_inline__))
+vld4q_f32 (const float32_t * __a)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x16x4_t __attribute__ ((__always_inline__))
+vld4q_u8 (const uint8_t * __a)
+{
+ union { uint8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x4_t __attribute__ ((__always_inline__))
+vld4q_u16 (const uint16_t * __a)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x4_t __attribute__ ((__always_inline__))
+vld4q_u32 (const uint32_t * __a)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v4si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x16x4_t __attribute__ ((__always_inline__))
+vld4q_p8 (const poly8_t * __a)
+{
+ union { poly8x16x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v16qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x4_t __attribute__ ((__always_inline__))
+vld4q_p16 (const poly16_t * __a)
+{
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4v8hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__))
+vld4_lane_s8 (const int8_t * __a, int8x8x4_t __b, const int __c)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__))
+vld4_lane_s16 (const int16_t * __a, int16x4x4_t __b, const int __c)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__))
+vld4_lane_s32 (const int32_t * __a, int32x2x4_t __b, const int __c)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__))
+vld4_lane_f32 (const float32_t * __a, float32x2x4_t __b, const int __c)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev2sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__))
+vld4_lane_u8 (const uint8_t * __a, uint8x8x4_t __b, const int __c)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__))
+vld4_lane_u16 (const uint16_t * __a, uint16x4x4_t __b, const int __c)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__))
+vld4_lane_u32 (const uint32_t * __a, uint32x2x4_t __b, const int __c)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev2si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__))
+vld4_lane_p8 (const poly8_t * __a, poly8x8x4_t __b, const int __c)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8qi ((const __builtin_neon_qi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__))
+vld4_lane_p16 (const poly16_t * __a, poly16x4x4_t __b, const int __c)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x8x4_t __attribute__ ((__always_inline__))
+vld4q_lane_s16 (const int16_t * __a, int16x8x4_t __b, const int __c)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x4x4_t __attribute__ ((__always_inline__))
+vld4q_lane_s32 (const int32_t * __a, int32x4x4_t __b, const int __c)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x4x4_t __attribute__ ((__always_inline__))
+vld4q_lane_f32 (const float32_t * __a, float32x4x4_t __b, const int __c)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4sf ((const __builtin_neon_sf *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x8x4_t __attribute__ ((__always_inline__))
+vld4q_lane_u16 (const uint16_t * __a, uint16x8x4_t __b, const int __c)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x4x4_t __attribute__ ((__always_inline__))
+vld4q_lane_u32 (const uint32_t * __a, uint32x4x4_t __b, const int __c)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev4si ((const __builtin_neon_si *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x8x4_t __attribute__ ((__always_inline__))
+vld4q_lane_p16 (const poly16_t * __a, poly16x8x4_t __b, const int __c)
+{
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_lanev8hi ((const __builtin_neon_hi *) __a, __bu.__o, __c);
+ return __rv.__i;
+}
+
+__extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__))
+vld4_dup_s8 (const int8_t * __a)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__))
+vld4_dup_s16 (const int16_t * __a)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__))
+vld4_dup_s32 (const int32_t * __a)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__))
+vld4_dup_f32 (const float32_t * __a)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv2sf ((const __builtin_neon_sf *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__))
+vld4_dup_u8 (const uint8_t * __a)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__))
+vld4_dup_u16 (const uint16_t * __a)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__))
+vld4_dup_u32 (const uint32_t * __a)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv2si ((const __builtin_neon_si *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__))
+vld4_dup_p8 (const poly8_t * __a)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv8qi ((const __builtin_neon_qi *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__))
+vld4_dup_p16 (const poly16_t * __a)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupv4hi ((const __builtin_neon_hi *) __a);
+ return __rv.__i;
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1x4_t __attribute__ ((__always_inline__))
+vld4_dup_p64 (const poly64_t * __a)
+{
+ union { poly64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+#endif
+__extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__))
+vld4_dup_s64 (const int64_t * __a)
+{
+ union { int64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline uint64x1x4_t __attribute__ ((__always_inline__))
+vld4_dup_u64 (const uint64_t * __a)
+{
+ union { uint64x1x4_t __i; __builtin_neon_oi __o; } __rv;
+ __rv.__o = __builtin_neon_vld4_dupdi ((const __builtin_neon_di *) __a);
+ return __rv.__i;
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s8 (int8_t * __a, int8x8x4_t __b)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s16 (int16_t * __a, int16x4x4_t __b)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s32 (int32_t * __a, int32x2x4_t __b)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_f32 (float32_t * __a, float32x2x4_t __b)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v2sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u8 (uint8_t * __a, uint8x8x4_t __b)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u16 (uint16_t * __a, uint16x4x4_t __b)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u32 (uint32_t * __a, uint32x2x4_t __b)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v2si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_p8 (poly8_t * __a, poly8x8x4_t __b)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v8qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_p16 (poly16_t * __a, poly16x4x4_t __b)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4v4hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_p64 (poly64_t * __a, poly64x1x4_t __b)
+{
+ union { poly64x1x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+#endif
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_s64 (int64_t * __a, int64x1x4_t __b)
+{
+ union { int64x1x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_u64 (uint64_t * __a, uint64x1x4_t __b)
+{
+ union { uint64x1x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4di ((__builtin_neon_di *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s8 (int8_t * __a, int8x16x4_t __b)
+{
+ union { int8x16x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s16 (int16_t * __a, int16x8x4_t __b)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_s32 (int32_t * __a, int32x4x4_t __b)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_f32 (float32_t * __a, float32x4x4_t __b)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v4sf ((__builtin_neon_sf *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u8 (uint8_t * __a, uint8x16x4_t __b)
+{
+ union { uint8x16x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u16 (uint16_t * __a, uint16x8x4_t __b)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_u32 (uint32_t * __a, uint32x4x4_t __b)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v4si ((__builtin_neon_si *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_p8 (poly8_t * __a, poly8x16x4_t __b)
+{
+ union { poly8x16x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v16qi ((__builtin_neon_qi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_p16 (poly16_t * __a, poly16x8x4_t __b)
+{
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4v8hi ((__builtin_neon_hi *) __a, __bu.__o);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_s8 (int8_t * __a, int8x8x4_t __b, const int __c)
+{
+ union { int8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_s16 (int16_t * __a, int16x4x4_t __b, const int __c)
+{
+ union { int16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_s32 (int32_t * __a, int32x2x4_t __b, const int __c)
+{
+ union { int32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_f32 (float32_t * __a, float32x2x4_t __b, const int __c)
+{
+ union { float32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev2sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_u8 (uint8_t * __a, uint8x8x4_t __b, const int __c)
+{
+ union { uint8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_u16 (uint16_t * __a, uint16x4x4_t __b, const int __c)
+{
+ union { uint16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_u32 (uint32_t * __a, uint32x2x4_t __b, const int __c)
+{
+ union { uint32x2x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev2si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_p8 (poly8_t * __a, poly8x8x4_t __b, const int __c)
+{
+ union { poly8x8x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8qi ((__builtin_neon_qi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4_lane_p16 (poly16_t * __a, poly16x4x4_t __b, const int __c)
+{
+ union { poly16x4x4_t __i; __builtin_neon_oi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_s16 (int16_t * __a, int16x8x4_t __b, const int __c)
+{
+ union { int16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_s32 (int32_t * __a, int32x4x4_t __b, const int __c)
+{
+ union { int32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_f32 (float32_t * __a, float32x4x4_t __b, const int __c)
+{
+ union { float32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4sf ((__builtin_neon_sf *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_u16 (uint16_t * __a, uint16x8x4_t __b, const int __c)
+{
+ union { uint16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_u32 (uint32_t * __a, uint32x4x4_t __b, const int __c)
+{
+ union { uint32x4x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev4si ((__builtin_neon_si *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vst4q_lane_p16 (poly16_t * __a, poly16x8x4_t __b, const int __c)
+{
+ union { poly16x8x4_t __i; __builtin_neon_xi __o; } __bu = { __b };
+ __builtin_neon_vst4_lanev8hi ((__builtin_neon_hi *) __a, __bu.__o, __c);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vand_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vandv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vand_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vandv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vand_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vandv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vand_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vandv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vand_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vandv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vand_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vandv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vand_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vanddi (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vand_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vanddi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vandq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vandv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vandq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vandv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vandq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vandv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vandq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vandv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vandq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vandv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vandq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vandv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vandq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vandv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vandq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vandv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vorr_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vorrv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vorr_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vorrv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vorr_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vorrv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vorr_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vorrv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vorr_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vorrv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vorr_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vorrv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vorr_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vorrdi (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vorr_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vorrdi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vorrq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vorrv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vorrq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vorrv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vorrq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vorrv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vorrq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vorrv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vorrq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vorrv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vorrq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vorrv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vorrq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vorrv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vorrq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vorrv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+veor_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_veorv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+veor_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_veorv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+veor_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_veorv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+veor_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_veorv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+veor_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_veorv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+veor_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_veorv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+veor_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_veordi (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+veor_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_veordi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+veorq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_veorv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+veorq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_veorv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+veorq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_veorv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+veorq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_veorv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+veorq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_veorv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+veorq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_veorv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+veorq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_veorv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+veorq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_veorv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vbic_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vbicv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vbic_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vbicv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vbic_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vbicv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vbic_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vbicv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vbic_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vbicv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vbic_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vbicv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vbic_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vbicdi (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vbic_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vbicdi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vbicq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vbicv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vbicq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vbicv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vbicq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vbicv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vbicq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vbicv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vbicq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vbicv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vbicq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vbicv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vbicq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vbicv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vbicq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vbicv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vorn_s8 (int8x8_t __a, int8x8_t __b)
+{
+ return (int8x8_t)__builtin_neon_vornv8qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vorn_s16 (int16x4_t __a, int16x4_t __b)
+{
+ return (int16x4_t)__builtin_neon_vornv4hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vorn_s32 (int32x2_t __a, int32x2_t __b)
+{
+ return (int32x2_t)__builtin_neon_vornv2si (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vorn_u8 (uint8x8_t __a, uint8x8_t __b)
+{
+ return (uint8x8_t)__builtin_neon_vornv8qi ((int8x8_t) __a, (int8x8_t) __b, 0);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vorn_u16 (uint16x4_t __a, uint16x4_t __b)
+{
+ return (uint16x4_t)__builtin_neon_vornv4hi ((int16x4_t) __a, (int16x4_t) __b, 0);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vorn_u32 (uint32x2_t __a, uint32x2_t __b)
+{
+ return (uint32x2_t)__builtin_neon_vornv2si ((int32x2_t) __a, (int32x2_t) __b, 0);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vorn_s64 (int64x1_t __a, int64x1_t __b)
+{
+ return (int64x1_t)__builtin_neon_vorndi (__a, __b, 1);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vorn_u64 (uint64x1_t __a, uint64x1_t __b)
+{
+ return (uint64x1_t)__builtin_neon_vorndi ((int64x1_t) __a, (int64x1_t) __b, 0);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vornq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return (int8x16_t)__builtin_neon_vornv16qi (__a, __b, 1);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vornq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return (int16x8_t)__builtin_neon_vornv8hi (__a, __b, 1);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vornq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return (int32x4_t)__builtin_neon_vornv4si (__a, __b, 1);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vornq_s64 (int64x2_t __a, int64x2_t __b)
+{
+ return (int64x2_t)__builtin_neon_vornv2di (__a, __b, 1);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vornq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return (uint8x16_t)__builtin_neon_vornv16qi ((int8x16_t) __a, (int8x16_t) __b, 0);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vornq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return (uint16x8_t)__builtin_neon_vornv8hi ((int16x8_t) __a, (int16x8_t) __b, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vornq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return (uint32x4_t)__builtin_neon_vornv4si ((int32x4_t) __a, (int32x4_t) __b, 0);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vornq_u64 (uint64x2_t __a, uint64x2_t __b)
+{
+ return (uint64x2_t)__builtin_neon_vornv2di ((int64x2_t) __a, (int64x2_t) __b, 0);
+}
+
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_p16 (poly16x4_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_f32 (float32x2_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_p64 (poly64x1_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
+}
+
+#endif
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s64 (int64x1_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u64 (uint64x1_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s8 (int8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv8qi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s16 (int16x4_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_s32 (int32x2_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u8 (uint8x8_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u16 (uint16x4_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_u32 (uint32x2_t __a)
+{
+ return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_p8 (poly8x8_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_f32 (float32x2_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_p64 (poly64x1_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
+}
+
+#endif
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s64 (int64x1_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u64 (uint64x1_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s8 (int8x8_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s16 (int16x4_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv4hi (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_s32 (int32x2_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u8 (uint8x8_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u16 (uint16x4_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_u32 (uint32x2_t __a)
+{
+ return (poly16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p8 (poly8x8_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p16 (poly16x4_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv4hi ((int16x4_t) __a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_p64 (poly64x1_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfdi (__a);
+}
+
+#endif
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s64 (int64x1_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfdi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u64 (uint64x1_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfdi ((int64x1_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s8 (int8x8_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv8qi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s16 (int16x4_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv4hi (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_s32 (int32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv2si (__a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u8 (uint8x8_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u16 (uint16x4_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_u32 (uint32x2_t __a)
+{
+ return (float32x2_t)__builtin_neon_vreinterpretv2sfv2si ((int32x2_t) __a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_p8 (poly8x8_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_p16 (poly16x4_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_f32 (float32x2_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_s64 (int64x1_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vreinterpretdidi (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_u64 (uint64x1_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vreinterpretdidi ((int64x1_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_s8 (int8x8_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv8qi (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_s16 (int16x4_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv4hi (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_s32 (int32x2_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv2si (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_u8 (uint8x8_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_u16 (uint16x4_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_u32 (uint32x2_t __a)
+{
+ return (poly64x1_t)__builtin_neon_vreinterpretdiv2si ((int32x2_t) __a);
+}
+
+#endif
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p8 (poly8x8_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p16 (poly16x4_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_f32 (float32x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_p64 (poly64x1_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdidi (__a);
+}
+
+#endif
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u64 (uint64x1_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s8 (int8x8_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv8qi (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s16 (int16x4_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv4hi (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_s32 (int32x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv2si (__a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u8 (uint8x8_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u16 (uint16x4_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_u32 (uint32x2_t __a)
+{
+ return (int64x1_t)__builtin_neon_vreinterpretdiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p8 (poly8x8_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p16 (poly16x4_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_f32 (float32x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_p64 (poly64x1_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdidi (__a);
+}
+
+#endif
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s64 (int64x1_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdidi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s8 (int8x8_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s16 (int16x4_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_s32 (int32x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv2si (__a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u8 (uint8x8_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u16 (uint16x4_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_u32 (uint32x2_t __a)
+{
+ return (uint64x1_t)__builtin_neon_vreinterpretdiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p8 (poly8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p16 (poly16x4_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_f32 (float32x2_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_p64 (poly64x1_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
+}
+
+#endif
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s64 (int64x1_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u64 (uint64x1_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s16 (int16x4_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_s32 (int32x2_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u8 (uint8x8_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u16 (uint16x4_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_u32 (uint32x2_t __a)
+{
+ return (int8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p8 (poly8x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p16 (poly16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_f32 (float32x2_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_p64 (poly64x1_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
+}
+
+#endif
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s64 (int64x1_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u64 (uint64x1_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s8 (int8x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_s32 (int32x2_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u8 (uint8x8_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u16 (uint16x4_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_u32 (uint32x2_t __a)
+{
+ return (int16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p8 (poly8x8_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p16 (poly16x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_f32 (float32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_p64 (poly64x1_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
+}
+
+#endif
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s64 (int64x1_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u64 (uint64x1_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2sidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s8 (int8x8_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_s16 (int16x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi (__a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u8 (uint8x8_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u16 (uint16x4_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_u32 (uint32x2_t __a)
+{
+ return (int32x2_t)__builtin_neon_vreinterpretv2siv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p8 (poly8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p16 (poly16x4_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_f32 (float32x2_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_p64 (poly64x1_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
+}
+
+#endif
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s64 (int64x1_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qidi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u64 (uint64x1_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s8 (int8x8_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv8qi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s16 (int16x4_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_s32 (int32x2_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u16 (uint16x4_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_u32 (uint32x2_t __a)
+{
+ return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p8 (poly8x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p16 (poly16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_f32 (float32x2_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_p64 (poly64x1_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
+}
+
+#endif
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s64 (int64x1_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hidi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u64 (uint64x1_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s8 (int8x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s16 (int16x4_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv4hi (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_s32 (int32x2_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u8 (uint8x8_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_u32 (uint32x2_t __a)
+{
+ return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p8 (poly8x8_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p16 (poly16x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_f32 (float32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_p64 (poly64x1_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
+}
+
+#endif
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s64 (int64x1_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2sidi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u64 (uint64x1_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2sidi ((int64x1_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s8 (int8x8_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s16 (int16x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_s32 (int32x2_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv2si (__a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u8 (uint8x8_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv8qi ((int8x8_t) __a);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_u16 (uint16x4_t __a)
+{
+ return (uint32x2_t)__builtin_neon_vreinterpretv2siv4hi ((int16x4_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_p16 (poly16x8_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_f32 (float32x4_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_p64 (poly64x2_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_p128 (poly128_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s64 (int64x2_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u64 (uint64x2_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s8 (int8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv16qi (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s16 (int16x8_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_s32 (int32x4_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u8 (uint8x16_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u16 (uint16x8_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_p8_u32 (uint32x4_t __a)
+{
+ return (poly8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_p8 (poly8x16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_f32 (float32x4_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_p64 (poly64x2_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_p128 (poly128_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s64 (int64x2_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u64 (uint64x2_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s8 (int8x16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s16 (int16x8_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv8hi (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_s32 (int32x4_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u8 (uint8x16_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u16 (uint16x8_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_p16_u32 (uint32x4_t __a)
+{
+ return (poly16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p8 (poly8x16_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p16 (poly16x8_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi ((int16x8_t) __a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p64 (poly64x2_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_p128 (poly128_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s64 (int64x2_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv2di (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u64 (uint64x2_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s8 (int8x16_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s16 (int16x8_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_s32 (int32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv4si (__a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u8 (uint8x16_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u16 (uint16x8_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_f32_u32 (uint32x4_t __a)
+{
+ return (float32x4_t)__builtin_neon_vreinterpretv4sfv4si ((int32x4_t) __a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_p8 (poly8x16_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_p16 (poly16x8_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_f32 (float32x4_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div4sf (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_p128 (poly128_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2diti ((__builtin_neon_ti) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_s64 (int64x2_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div2di (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_u64 (uint64x2_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_s8 (int8x16_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div16qi (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_s16 (int16x8_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div8hi (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_s32 (int32x4_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div4si (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_u8 (uint8x16_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_u16 (uint16x8_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_p64_u32 (uint32x4_t __a)
+{
+ return (poly64x2_t)__builtin_neon_vreinterpretv2div4si ((int32x4_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_p8 (poly8x16_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv16qi ((int8x16_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_p16 (poly16x8_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv8hi ((int16x8_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_f32 (float32x4_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv4sf (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_p64 (poly64x2_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_s64 (int64x2_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv2di (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_u64 (uint64x2_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_s8 (int8x16_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv16qi (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_s16 (int16x8_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv8hi (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_s32 (int32x4_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv4si (__a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_u8 (uint8x16_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv16qi ((int8x16_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_u16 (uint16x8_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv8hi ((int16x8_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vreinterpretq_p128_u32 (uint32x4_t __a)
+{
+ return (poly128_t)__builtin_neon_vreinterprettiv4si ((int32x4_t) __a);
+}
+
+#endif
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p8 (poly8x16_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p16 (poly16x8_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_f32 (float32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p64 (poly64x2_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_p128 (poly128_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2diti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u64 (uint64x2_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s8 (int8x16_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div16qi (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s16 (int16x8_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div8hi (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_s32 (int32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div4si (__a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u8 (uint8x16_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u16 (uint16x8_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_s64_u32 (uint32x4_t __a)
+{
+ return (int64x2_t)__builtin_neon_vreinterpretv2div4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p8 (poly8x16_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p16 (poly16x8_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_f32 (float32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p64 (poly64x2_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_p128 (poly128_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2diti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s64 (int64x2_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div2di (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s8 (int8x16_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s16 (int16x8_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_s32 (int32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div4si (__a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u8 (uint8x16_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u16 (uint16x8_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+vreinterpretq_u64_u32 (uint32x4_t __a)
+{
+ return (uint64x2_t)__builtin_neon_vreinterpretv2div4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p8 (poly8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p16 (poly16x8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_f32 (float32x4_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p64 (poly64x2_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_p128 (poly128_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s64 (int64x2_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u64 (uint64x2_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s16 (int16x8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_s32 (int32x4_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u8 (uint8x16_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u16 (uint16x8_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_s8_u32 (uint32x4_t __a)
+{
+ return (int8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p8 (poly8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p16 (poly16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_f32 (float32x4_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p64 (poly64x2_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_p128 (poly128_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s64 (int64x2_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u64 (uint64x2_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s8 (int8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_s32 (int32x4_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u8 (uint8x16_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u16 (uint16x8_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_s16_u32 (uint32x4_t __a)
+{
+ return (int16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p8 (poly8x16_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p16 (poly16x8_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_f32 (float32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p64 (poly64x2_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_p128 (poly128_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s64 (int64x2_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv2di (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u64 (uint64x2_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s8 (int8x16_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv16qi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_s16 (int16x8_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi (__a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u8 (uint8x16_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u16 (uint16x8_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_s32_u32 (uint32x4_t __a)
+{
+ return (int32x4_t)__builtin_neon_vreinterpretv4siv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_p8 (poly8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_p16 (poly16x8_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_f32 (float32x4_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_p64 (poly64x2_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_p128 (poly128_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s64 (int64x2_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv2di (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u64 (uint64x2_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s8 (int8x16_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv16qi (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s16 (int16x8_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_s32 (int32x4_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4si (__a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u16 (uint16x8_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vreinterpretq_u8_u32 (uint32x4_t __a)
+{
+ return (uint8x16_t)__builtin_neon_vreinterpretv16qiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p8 (poly8x16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p16 (poly16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_f32 (float32x4_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p64 (poly64x2_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_p128 (poly128_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s64 (int64x2_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv2di (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u64 (uint64x2_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s8 (int8x16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv16qi (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s16 (int16x8_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv8hi (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_s32 (int32x4_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv4si (__a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u8 (uint8x16_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vreinterpretq_u16_u32 (uint32x4_t __a)
+{
+ return (uint16x8_t)__builtin_neon_vreinterpretv8hiv4si ((int32x4_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p8 (poly8x16_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p16 (poly16x8_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_f32 (float32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv4sf (__a);
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p64 (poly64x2_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_p128 (poly128_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siti ((__builtin_neon_ti) __a);
+}
+
+#endif
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s64 (int64x2_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv2di (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u64 (uint64x2_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv2di ((int64x2_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s8 (int8x16_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s16 (int16x8_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_s32 (int32x4_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv4si (__a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u8 (uint8x16_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv16qi ((int8x16_t) __a);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vreinterpretq_u32_u16 (uint16x8_t __a)
+{
+ return (uint32x4_t)__builtin_neon_vreinterpretv4siv8hi ((int16x8_t) __a);
+}
+
+
+#ifdef __ARM_FEATURE_CRYPTO
+
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vldrq_p128 (poly128_t const * __ptr)
+{
+#ifdef __ARM_BIG_ENDIAN
+ poly64_t* __ptmp = (poly64_t*) __ptr;
+ poly64_t __d0 = vld1_p64 (__ptmp);
+ poly64_t __d1 = vld1_p64 (__ptmp + 1);
+ return vreinterpretq_p128_p64 (vcombine_p64 (__d1, __d0));
+#else
+ return vreinterpretq_p128_p64 (vld1q_p64 ((poly64_t*) __ptr));
+#endif
+}
+
+__extension__ static __inline void __attribute__ ((__always_inline__))
+vstrq_p128 (poly128_t * __ptr, poly128_t __val)
+{
+#ifdef __ARM_BIG_ENDIAN
+ poly64x2_t __tmp = vreinterpretq_p64_p128 (__val);
+ poly64_t __d0 = vget_high_p64 (__tmp);
+ poly64_t __d1 = vget_low_p64 (__tmp);
+ vst1q_p64 ((poly64_t*) __ptr, vcombine_p64 (__d0, __d1));
+#else
+ vst1q_p64 ((poly64_t*) __ptr, vreinterpretq_p64_p128 (__val));
+#endif
+}
+
+/* The vceq_p64 intrinsic does not map to a single instruction.
+ Instead we emulate it by performing a 32-bit variant of the vceq
+ and applying a pairwise min reduction to the result.
+ vceq_u32 will produce two 32-bit halves, each of which will contain either
+ all ones or all zeros depending on whether the corresponding 32-bit
+ halves of the poly64_t were equal. The whole poly64_t values are equal
+ if and only if both halves are equal, i.e. vceq_u32 returns all ones.
+ If the result is all zeroes for any half then the whole result is zeroes.
+ This is what the pairwise min reduction achieves. */
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vceq_p64 (poly64x1_t __a, poly64x1_t __b)
+{
+ uint32x2_t __t_a = vreinterpret_u32_p64 (__a);
+ uint32x2_t __t_b = vreinterpret_u32_p64 (__b);
+ uint32x2_t __c = vceq_u32 (__t_a, __t_b);
+ uint32x2_t __m = vpmin_u32 (__c, __c);
+ return vreinterpret_u64_u32 (__m);
+}
+
+/* The vtst_p64 intrinsic does not map to a single instruction.
+ We emulate it in way similar to vceq_p64 above but here we do
+ a reduction with max since if any two corresponding bits
+ in the two poly64_t's match, then the whole result must be all ones. */
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vtst_p64 (poly64x1_t __a, poly64x1_t __b)
+{
+ uint32x2_t __t_a = vreinterpret_u32_p64 (__a);
+ uint32x2_t __t_b = vreinterpret_u32_p64 (__b);
+ uint32x2_t __c = vtst_u32 (__t_a, __t_b);
+ uint32x2_t __m = vpmax_u32 (__c, __c);
+ return vreinterpret_u64_u32 (__m);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaeseq_u8 (uint8x16_t __data, uint8x16_t __key)
+{
+ return __builtin_arm_crypto_aese (__data, __key);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaesdq_u8 (uint8x16_t __data, uint8x16_t __key)
+{
+ return __builtin_arm_crypto_aesd (__data, __key);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaesmcq_u8 (uint8x16_t __data)
+{
+ return __builtin_arm_crypto_aesmc (__data);
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vaesimcq_u8 (uint8x16_t __data)
+{
+ return __builtin_arm_crypto_aesimc (__data);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vsha1h_u32 (uint32_t __hash_e)
+{
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ __t = __builtin_arm_crypto_sha1h (__t);
+ return vgetq_lane_u32 (__t, 0);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha1cq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
+{
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ return __builtin_arm_crypto_sha1c (__hash_abcd, __t, __wk);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha1pq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
+{
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ return __builtin_arm_crypto_sha1p (__hash_abcd, __t, __wk);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha1mq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
+{
+ uint32x4_t __t = vdupq_n_u32 (0);
+ __t = vsetq_lane_u32 (__hash_e, __t, 0);
+ return __builtin_arm_crypto_sha1m (__hash_abcd, __t, __wk);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha1su0q_u32 (uint32x4_t __w0_3, uint32x4_t __w4_7, uint32x4_t __w8_11)
+{
+ return __builtin_arm_crypto_sha1su0 (__w0_3, __w4_7, __w8_11);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha1su1q_u32 (uint32x4_t __tw0_3, uint32x4_t __w12_15)
+{
+ return __builtin_arm_crypto_sha1su1 (__tw0_3, __w12_15);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha256hq_u32 (uint32x4_t __hash_abcd, uint32x4_t __hash_efgh, uint32x4_t __wk)
+{
+ return __builtin_arm_crypto_sha256h (__hash_abcd, __hash_efgh, __wk);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha256h2q_u32 (uint32x4_t __hash_abcd, uint32x4_t __hash_efgh, uint32x4_t __wk)
+{
+ return __builtin_arm_crypto_sha256h2 (__hash_abcd, __hash_efgh, __wk);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha256su0q_u32 (uint32x4_t __w0_3, uint32x4_t __w4_7)
+{
+ return __builtin_arm_crypto_sha256su0 (__w0_3, __w4_7);
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vsha256su1q_u32 (uint32x4_t __tw0_3, uint32x4_t __w8_11, uint32x4_t __w12_15)
+{
+ return __builtin_arm_crypto_sha256su1 (__tw0_3, __w8_11, __w12_15);
+}
+
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vmull_p64 (poly64_t __a, poly64_t __b)
+{
+ return (poly128_t) __builtin_arm_crypto_vmullp64 ((uint64_t) __a, (uint64_t) __b);
+}
+
+__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+vmull_high_p64 (poly64x2_t __a, poly64x2_t __b)
+{
+ poly64_t __t1 = vget_high_p64 (__a);
+ poly64_t __t2 = vget_high_p64 (__b);
+
+ return (poly128_t) __builtin_arm_crypto_vmullp64 ((uint64_t) __t1, (uint64_t) __t2);
+}
+
+#endif
+#ifdef __cplusplus
+}
+#endif
+#endif
+#endif
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/float.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/float.h
new file mode 100644
index 0000000..a8e05bf
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/float.h
@@ -0,0 +1,277 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 5.2.4.2.2 Characteristics of floating types <float.h>
+ */
+
+#ifndef _FLOAT_H___
+#define _FLOAT_H___
+
+/* Radix of exponent representation, b. */
+#undef FLT_RADIX
+#define FLT_RADIX __FLT_RADIX__
+
+/* Number of base-FLT_RADIX digits in the significand, p. */
+#undef FLT_MANT_DIG
+#undef DBL_MANT_DIG
+#undef LDBL_MANT_DIG
+#define FLT_MANT_DIG __FLT_MANT_DIG__
+#define DBL_MANT_DIG __DBL_MANT_DIG__
+#define LDBL_MANT_DIG __LDBL_MANT_DIG__
+
+/* Number of decimal digits, q, such that any floating-point number with q
+ decimal digits can be rounded into a floating-point number with p radix b
+ digits and back again without change to the q decimal digits,
+
+ p * log10(b) if b is a power of 10
+ floor((p - 1) * log10(b)) otherwise
+*/
+#undef FLT_DIG
+#undef DBL_DIG
+#undef LDBL_DIG
+#define FLT_DIG __FLT_DIG__
+#define DBL_DIG __DBL_DIG__
+#define LDBL_DIG __LDBL_DIG__
+
+/* Minimum int x such that FLT_RADIX**(x-1) is a normalized float, emin */
+#undef FLT_MIN_EXP
+#undef DBL_MIN_EXP
+#undef LDBL_MIN_EXP
+#define FLT_MIN_EXP __FLT_MIN_EXP__
+#define DBL_MIN_EXP __DBL_MIN_EXP__
+#define LDBL_MIN_EXP __LDBL_MIN_EXP__
+
+/* Minimum negative integer such that 10 raised to that power is in the
+ range of normalized floating-point numbers,
+
+ ceil(log10(b) * (emin - 1))
+*/
+#undef FLT_MIN_10_EXP
+#undef DBL_MIN_10_EXP
+#undef LDBL_MIN_10_EXP
+#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__
+#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__
+#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__
+
+/* Maximum int x such that FLT_RADIX**(x-1) is a representable float, emax. */
+#undef FLT_MAX_EXP
+#undef DBL_MAX_EXP
+#undef LDBL_MAX_EXP
+#define FLT_MAX_EXP __FLT_MAX_EXP__
+#define DBL_MAX_EXP __DBL_MAX_EXP__
+#define LDBL_MAX_EXP __LDBL_MAX_EXP__
+
+/* Maximum integer such that 10 raised to that power is in the range of
+ representable finite floating-point numbers,
+
+ floor(log10((1 - b**-p) * b**emax))
+*/
+#undef FLT_MAX_10_EXP
+#undef DBL_MAX_10_EXP
+#undef LDBL_MAX_10_EXP
+#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__
+#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__
+#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__
+
+/* Maximum representable finite floating-point number,
+
+ (1 - b**-p) * b**emax
+*/
+#undef FLT_MAX
+#undef DBL_MAX
+#undef LDBL_MAX
+#define FLT_MAX __FLT_MAX__
+#define DBL_MAX __DBL_MAX__
+#define LDBL_MAX __LDBL_MAX__
+
+/* The difference between 1 and the least value greater than 1 that is
+ representable in the given floating point type, b**1-p. */
+#undef FLT_EPSILON
+#undef DBL_EPSILON
+#undef LDBL_EPSILON
+#define FLT_EPSILON __FLT_EPSILON__
+#define DBL_EPSILON __DBL_EPSILON__
+#define LDBL_EPSILON __LDBL_EPSILON__
+
+/* Minimum normalized positive floating-point number, b**(emin - 1). */
+#undef FLT_MIN
+#undef DBL_MIN
+#undef LDBL_MIN
+#define FLT_MIN __FLT_MIN__
+#define DBL_MIN __DBL_MIN__
+#define LDBL_MIN __LDBL_MIN__
+
+/* Addition rounds to 0: zero, 1: nearest, 2: +inf, 3: -inf, -1: unknown. */
+/* ??? This is supposed to change with calls to fesetround in <fenv.h>. */
+#undef FLT_ROUNDS
+#define FLT_ROUNDS 1
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+/* The floating-point expression evaluation method.
+ -1 indeterminate
+ 0 evaluate all operations and constants just to the range and
+ precision of the type
+ 1 evaluate operations and constants of type float and double
+ to the range and precision of the double type, evaluate
+ long double operations and constants to the range and
+ precision of the long double type
+ 2 evaluate all operations and constants to the range and
+ precision of the long double type
+
+ ??? This ought to change with the setting of the fp control word;
+ the value provided by the compiler assumes the widest setting. */
+#undef FLT_EVAL_METHOD
+#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
+
+/* Number of decimal digits, n, such that any floating-point number in the
+ widest supported floating type with pmax radix b digits can be rounded
+ to a floating-point number with n decimal digits and back again without
+ change to the value,
+
+ pmax * log10(b) if b is a power of 10
+ ceil(1 + pmax * log10(b)) otherwise
+*/
+#undef DECIMAL_DIG
+#define DECIMAL_DIG __DECIMAL_DIG__
+
+#endif /* C99 */
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
+/* Versions of DECIMAL_DIG for each floating-point type. */
+#undef FLT_DECIMAL_DIG
+#undef DBL_DECIMAL_DIG
+#undef LDBL_DECIMAL_DIG
+#define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__
+#define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__
+#define LDBL_DECIMAL_DIG __DECIMAL_DIG__
+
+/* Whether types support subnormal numbers. */
+#undef FLT_HAS_SUBNORM
+#undef DBL_HAS_SUBNORM
+#undef LDBL_HAS_SUBNORM
+#define FLT_HAS_SUBNORM __FLT_HAS_DENORM__
+#define DBL_HAS_SUBNORM __DBL_HAS_DENORM__
+#define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__
+
+/* Minimum positive values, including subnormals. */
+#undef FLT_TRUE_MIN
+#undef DBL_TRUE_MIN
+#undef LDBL_TRUE_MIN
+#if __FLT_HAS_DENORM__
+#define FLT_TRUE_MIN __FLT_DENORM_MIN__
+#else
+#define FLT_TRUE_MIN __FLT_MIN__
+#endif
+#if __DBL_HAS_DENORM__
+#define DBL_TRUE_MIN __DBL_DENORM_MIN__
+#else
+#define DBL_TRUE_MIN __DBL_MIN__
+#endif
+#if __LDBL_HAS_DENORM__
+#define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
+#else
+#define LDBL_TRUE_MIN __LDBL_MIN__
+#endif
+
+#endif /* C11 */
+
+#ifdef __STDC_WANT_DEC_FP__
+/* Draft Technical Report 24732, extension for decimal floating-point
+ arithmetic: Characteristic of decimal floating types <float.h>. */
+
+/* Number of base-FLT_RADIX digits in the significand, p. */
+#undef DEC32_MANT_DIG
+#undef DEC64_MANT_DIG
+#undef DEC128_MANT_DIG
+#define DEC32_MANT_DIG __DEC32_MANT_DIG__
+#define DEC64_MANT_DIG __DEC64_MANT_DIG__
+#define DEC128_MANT_DIG __DEC128_MANT_DIG__
+
+/* Minimum exponent. */
+#undef DEC32_MIN_EXP
+#undef DEC64_MIN_EXP
+#undef DEC128_MIN_EXP
+#define DEC32_MIN_EXP __DEC32_MIN_EXP__
+#define DEC64_MIN_EXP __DEC64_MIN_EXP__
+#define DEC128_MIN_EXP __DEC128_MIN_EXP__
+
+/* Maximum exponent. */
+#undef DEC32_MAX_EXP
+#undef DEC64_MAX_EXP
+#undef DEC128_MAX_EXP
+#define DEC32_MAX_EXP __DEC32_MAX_EXP__
+#define DEC64_MAX_EXP __DEC64_MAX_EXP__
+#define DEC128_MAX_EXP __DEC128_MAX_EXP__
+
+/* Maximum representable finite decimal floating-point number
+ (there are 6, 15, and 33 9s after the decimal points respectively). */
+#undef DEC32_MAX
+#undef DEC64_MAX
+#undef DEC128_MAX
+#define DEC32_MAX __DEC32_MAX__
+#define DEC64_MAX __DEC64_MAX__
+#define DEC128_MAX __DEC128_MAX__
+
+/* The difference between 1 and the least value greater than 1 that is
+ representable in the given floating point type. */
+#undef DEC32_EPSILON
+#undef DEC64_EPSILON
+#undef DEC128_EPSILON
+#define DEC32_EPSILON __DEC32_EPSILON__
+#define DEC64_EPSILON __DEC64_EPSILON__
+#define DEC128_EPSILON __DEC128_EPSILON__
+
+/* Minimum normalized positive floating-point number. */
+#undef DEC32_MIN
+#undef DEC64_MIN
+#undef DEC128_MIN
+#define DEC32_MIN __DEC32_MIN__
+#define DEC64_MIN __DEC64_MIN__
+#define DEC128_MIN __DEC128_MIN__
+
+/* Minimum subnormal positive floating-point number. */
+#undef DEC32_SUBNORMAL_MIN
+#undef DEC64_SUBNORMAL_MIN
+#undef DEC128_SUBNORMAL_MIN
+#define DEC32_SUBNORMAL_MIN __DEC32_SUBNORMAL_MIN__
+#define DEC64_SUBNORMAL_MIN __DEC64_SUBNORMAL_MIN__
+#define DEC128_SUBNORMAL_MIN __DEC128_SUBNORMAL_MIN__
+
+/* The floating-point expression evaluation method.
+ -1 indeterminate
+ 0 evaluate all operations and constants just to the range and
+ precision of the type
+ 1 evaluate operations and constants of type _Decimal32
+ and _Decimal64 to the range and precision of the _Decimal64
+ type, evaluate _Decimal128 operations and constants to the
+ range and precision of the _Decimal128 type;
+ 2 evaluate all operations and constants to the range and
+ precision of the _Decimal128 type. */
+
+#undef DEC_EVAL_METHOD
+#define DEC_EVAL_METHOD __DEC_EVAL_METHOD__
+
+#endif /* __STDC_WANT_DEC_FP__ */
+
+#endif /* _FLOAT_H___ */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/iso646.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/iso646.h
new file mode 100644
index 0000000..89bc8f4
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/iso646.h
@@ -0,0 +1,45 @@
+/* Copyright (C) 1997-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 7.9 Alternative spellings <iso646.h>
+ */
+
+#ifndef _ISO646_H
+#define _ISO646_H
+
+#ifndef __cplusplus
+#define and &&
+#define and_eq &=
+#define bitand &
+#define bitor |
+#define compl ~
+#define not !
+#define not_eq !=
+#define or ||
+#define or_eq |=
+#define xor ^
+#define xor_eq ^=
+#endif
+
+#endif
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/mmintrin.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/mmintrin.h
new file mode 100644
index 0000000..b906fac
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/mmintrin.h
@@ -0,0 +1,1836 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _MMINTRIN_H_INCLUDED
+#define _MMINTRIN_H_INCLUDED
+
+#ifndef __IWMMXT__
+#error mmintrin.h included without enabling WMMX/WMMX2 instructions (e.g. -march=iwmmxt or -march=iwmmxt2)
+#endif
+
+
+#if defined __cplusplus
+extern "C" {
+/* Intrinsics use C name-mangling. */
+#endif /* __cplusplus */
+
+/* The data type intended for user use. */
+typedef unsigned long long __m64, __int64;
+
+/* Internal data types for implementing the intrinsics. */
+typedef int __v2si __attribute__ ((vector_size (8)));
+typedef short __v4hi __attribute__ ((vector_size (8)));
+typedef signed char __v8qi __attribute__ ((vector_size (8)));
+
+/* Provided for source compatibility with MMX. */
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_empty (void)
+{
+}
+
+/* "Convert" __m64 and __int64 into each other. */
+static __inline __m64
+_mm_cvtsi64_m64 (__int64 __i)
+{
+ return __i;
+}
+
+static __inline __int64
+_mm_cvtm64_si64 (__m64 __i)
+{
+ return __i;
+}
+
+static __inline int
+_mm_cvtsi64_si32 (__int64 __i)
+{
+ return __i;
+}
+
+static __inline __int64
+_mm_cvtsi32_si64 (int __i)
+{
+ return (__i & 0xffffffff);
+}
+
+/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
+ values of the result, all with signed saturation. */
+static __inline __m64
+_mm_packs_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackhss ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of
+ the result, and the two 32-bit values from M2 into the upper two 16-bit
+ values of the result, all with signed saturation. */
+static __inline __m64
+_mm_packs_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackwss ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Copy the 64-bit value from M1 into the lower 32-bits of the result, and
+ the 64-bit value from M2 into the upper 32-bits of the result, all with
+ signed saturation for values that do not fit exactly into 32-bits. */
+static __inline __m64
+_mm_packs_pi64 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackdss ((long long)__m1, (long long)__m2);
+}
+
+/* Pack the four 16-bit values from M1 into the lower four 8-bit values of
+ the result, and the four 16-bit values from M2 into the upper four 8-bit
+ values of the result, all with unsigned saturation. */
+static __inline __m64
+_mm_packs_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackhus ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Pack the two 32-bit values from M1 into the lower two 16-bit values of
+ the result, and the two 32-bit values from M2 into the upper two 16-bit
+ values of the result, all with unsigned saturation. */
+static __inline __m64
+_mm_packs_pu32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackwus ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Copy the 64-bit value from M1 into the lower 32-bits of the result, and
+ the 64-bit value from M2 into the upper 32-bits of the result, all with
+ unsigned saturation for values that do not fit exactly into 32-bits. */
+static __inline __m64
+_mm_packs_pu64 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wpackdus ((long long)__m1, (long long)__m2);
+}
+
+/* Interleave the four 8-bit values from the high half of M1 with the four
+ 8-bit values from the high half of M2. */
+static __inline __m64
+_mm_unpackhi_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckihb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Interleave the two 16-bit values from the high half of M1 with the two
+ 16-bit values from the high half of M2. */
+static __inline __m64
+_mm_unpackhi_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckihh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Interleave the 32-bit value from the high half of M1 with the 32-bit
+ value from the high half of M2. */
+static __inline __m64
+_mm_unpackhi_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckihw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Interleave the four 8-bit values from the low half of M1 with the four
+ 8-bit values from the low half of M2. */
+static __inline __m64
+_mm_unpacklo_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckilb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Interleave the two 16-bit values from the low half of M1 with the two
+ 16-bit values from the low half of M2. */
+static __inline __m64
+_mm_unpacklo_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckilh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Interleave the 32-bit value from the low half of M1 with the 32-bit
+ value from the low half of M2. */
+static __inline __m64
+_mm_unpacklo_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wunpckilw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Take the four 8-bit values from the low half of M1, sign extend them,
+ and return the result as a vector of four 16-bit quantities. */
+static __inline __m64
+_mm_unpackel_pi8 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckelsb ((__v8qi)__m1);
+}
+
+/* Take the two 16-bit values from the low half of M1, sign extend them,
+ and return the result as a vector of two 32-bit quantities. */
+static __inline __m64
+_mm_unpackel_pi16 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckelsh ((__v4hi)__m1);
+}
+
+/* Take the 32-bit value from the low half of M1, and return it sign extended
+ to 64 bits. */
+static __inline __m64
+_mm_unpackel_pi32 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckelsw ((__v2si)__m1);
+}
+
+/* Take the four 8-bit values from the high half of M1, sign extend them,
+ and return the result as a vector of four 16-bit quantities. */
+static __inline __m64
+_mm_unpackeh_pi8 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehsb ((__v8qi)__m1);
+}
+
+/* Take the two 16-bit values from the high half of M1, sign extend them,
+ and return the result as a vector of two 32-bit quantities. */
+static __inline __m64
+_mm_unpackeh_pi16 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehsh ((__v4hi)__m1);
+}
+
+/* Take the 32-bit value from the high half of M1, and return it sign extended
+ to 64 bits. */
+static __inline __m64
+_mm_unpackeh_pi32 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehsw ((__v2si)__m1);
+}
+
+/* Take the four 8-bit values from the low half of M1, zero extend them,
+ and return the result as a vector of four 16-bit quantities. */
+static __inline __m64
+_mm_unpackel_pu8 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckelub ((__v8qi)__m1);
+}
+
+/* Take the two 16-bit values from the low half of M1, zero extend them,
+ and return the result as a vector of two 32-bit quantities. */
+static __inline __m64
+_mm_unpackel_pu16 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckeluh ((__v4hi)__m1);
+}
+
+/* Take the 32-bit value from the low half of M1, and return it zero extended
+ to 64 bits. */
+static __inline __m64
+_mm_unpackel_pu32 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckeluw ((__v2si)__m1);
+}
+
+/* Take the four 8-bit values from the high half of M1, zero extend them,
+ and return the result as a vector of four 16-bit quantities. */
+static __inline __m64
+_mm_unpackeh_pu8 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehub ((__v8qi)__m1);
+}
+
+/* Take the two 16-bit values from the high half of M1, zero extend them,
+ and return the result as a vector of two 32-bit quantities. */
+static __inline __m64
+_mm_unpackeh_pu16 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehuh ((__v4hi)__m1);
+}
+
+/* Take the 32-bit value from the high half of M1, and return it zero extended
+ to 64 bits. */
+static __inline __m64
+_mm_unpackeh_pu32 (__m64 __m1)
+{
+ return (__m64) __builtin_arm_wunpckehuw ((__v2si)__m1);
+}
+
+/* Add the 8-bit values in M1 to the 8-bit values in M2. */
+static __inline __m64
+_mm_add_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2. */
+static __inline __m64
+_mm_add_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Add the 32-bit values in M1 to the 32-bit values in M2. */
+static __inline __m64
+_mm_add_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Add the 8-bit values in M1 to the 8-bit values in M2 using signed
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddbss ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddhss ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Add the 32-bit values in M1 to the 32-bit values in M2 using signed
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddwss ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pu8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddbus ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddhus ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Add the 32-bit values in M1 to the 32-bit values in M2 using unsigned
+ saturated arithmetic. */
+static __inline __m64
+_mm_adds_pu32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_waddwus ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1. */
+static __inline __m64
+_mm_sub_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1. */
+static __inline __m64
+_mm_sub_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Subtract the 32-bit values in M2 from the 32-bit values in M1. */
+static __inline __m64
+_mm_sub_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed
+ saturating arithmetic. */
+static __inline __m64
+_mm_subs_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubbss ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
+ signed saturating arithmetic. */
+static __inline __m64
+_mm_subs_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubhss ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Subtract the 32-bit values in M2 from the 32-bit values in M1 using
+ signed saturating arithmetic. */
+static __inline __m64
+_mm_subs_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubwss ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using
+ unsigned saturating arithmetic. */
+static __inline __m64
+_mm_subs_pu8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubbus ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using
+ unsigned saturating arithmetic. */
+static __inline __m64
+_mm_subs_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubhus ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Subtract the 32-bit values in M2 from the 32-bit values in M1 using
+ unsigned saturating arithmetic. */
+static __inline __m64
+_mm_subs_pu32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wsubwus ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
+ four 32-bit intermediate results, which are then summed by pairs to
+ produce two 32-bit results. */
+static __inline __m64
+_mm_madd_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmadds ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing
+ four 32-bit intermediate results, which are then summed by pairs to
+ produce two 32-bit results. */
+static __inline __m64
+_mm_madd_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmaddu ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
+ M2 and produce the high 16 bits of the 32-bit results. */
+static __inline __m64
+_mm_mulhi_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmulsm ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in
+ M2 and produce the high 16 bits of the 32-bit results. */
+static __inline __m64
+_mm_mulhi_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmulum ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce
+ the low 16 bits of the results. */
+static __inline __m64
+_mm_mullo_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wmulul ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Shift four 16-bit values in M left by COUNT. */
+static __inline __m64
+_mm_sll_pi16 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsllh ((__v4hi)__m, __count);
+}
+
+static __inline __m64
+_mm_slli_pi16 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsllhi ((__v4hi)__m, __count);
+}
+
+/* Shift two 32-bit values in M left by COUNT. */
+static __inline __m64
+_mm_sll_pi32 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsllw ((__v2si)__m, __count);
+}
+
+static __inline __m64
+_mm_slli_pi32 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsllwi ((__v2si)__m, __count);
+}
+
+/* Shift the 64-bit value in M left by COUNT. */
+static __inline __m64
+_mm_sll_si64 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wslld (__m, __count);
+}
+
+static __inline __m64
+_mm_slli_si64 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wslldi (__m, __count);
+}
+
+/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */
+static __inline __m64
+_mm_sra_pi16 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrah ((__v4hi)__m, __count);
+}
+
+static __inline __m64
+_mm_srai_pi16 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrahi ((__v4hi)__m, __count);
+}
+
+/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */
+static __inline __m64
+_mm_sra_pi32 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsraw ((__v2si)__m, __count);
+}
+
+static __inline __m64
+_mm_srai_pi32 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrawi ((__v2si)__m, __count);
+}
+
+/* Shift the 64-bit value in M right by COUNT; shift in the sign bit. */
+static __inline __m64
+_mm_sra_si64 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrad (__m, __count);
+}
+
+static __inline __m64
+_mm_srai_si64 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsradi (__m, __count);
+}
+
+/* Shift four 16-bit values in M right by COUNT; shift in zeros. */
+static __inline __m64
+_mm_srl_pi16 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrlh ((__v4hi)__m, __count);
+}
+
+static __inline __m64
+_mm_srli_pi16 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrlhi ((__v4hi)__m, __count);
+}
+
+/* Shift two 32-bit values in M right by COUNT; shift in zeros. */
+static __inline __m64
+_mm_srl_pi32 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrlw ((__v2si)__m, __count);
+}
+
+static __inline __m64
+_mm_srli_pi32 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrlwi ((__v2si)__m, __count);
+}
+
+/* Shift the 64-bit value in M left by COUNT; shift in zeros. */
+static __inline __m64
+_mm_srl_si64 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wsrld (__m, __count);
+}
+
+static __inline __m64
+_mm_srli_si64 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wsrldi (__m, __count);
+}
+
+/* Rotate four 16-bit values in M right by COUNT. */
+static __inline __m64
+_mm_ror_pi16 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wrorh ((__v4hi)__m, __count);
+}
+
+static __inline __m64
+_mm_rori_pi16 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wrorhi ((__v4hi)__m, __count);
+}
+
+/* Rotate two 32-bit values in M right by COUNT. */
+static __inline __m64
+_mm_ror_pi32 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wrorw ((__v2si)__m, __count);
+}
+
+static __inline __m64
+_mm_rori_pi32 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wrorwi ((__v2si)__m, __count);
+}
+
+/* Rotate two 64-bit values in M right by COUNT. */
+static __inline __m64
+_mm_ror_si64 (__m64 __m, __m64 __count)
+{
+ return (__m64) __builtin_arm_wrord (__m, __count);
+}
+
+static __inline __m64
+_mm_rori_si64 (__m64 __m, int __count)
+{
+ return (__m64) __builtin_arm_wrordi (__m, __count);
+}
+
+/* Bit-wise AND the 64-bit values in M1 and M2. */
+static __inline __m64
+_mm_and_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_arm_wand (__m1, __m2);
+}
+
+/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the
+ 64-bit value in M2. */
+static __inline __m64
+_mm_andnot_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_arm_wandn (__m2, __m1);
+}
+
+/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */
+static __inline __m64
+_mm_or_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_arm_wor (__m1, __m2);
+}
+
+/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */
+static __inline __m64
+_mm_xor_si64 (__m64 __m1, __m64 __m2)
+{
+ return __builtin_arm_wxor (__m1, __m2);
+}
+
+/* Compare eight 8-bit values. The result of the comparison is 0xFF if the
+ test is true and zero if false. */
+static __inline __m64
+_mm_cmpeq_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpeqb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pi8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtsb ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pu8 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtub ((__v8qi)__m1, (__v8qi)__m2);
+}
+
+/* Compare four 16-bit values. The result of the comparison is 0xFFFF if
+ the test is true and zero if false. */
+static __inline __m64
+_mm_cmpeq_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpeqh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pi16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtsh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pu16 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtuh ((__v4hi)__m1, (__v4hi)__m2);
+}
+
+/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if
+ the test is true and zero if false. */
+static __inline __m64
+_mm_cmpeq_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpeqw ((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pi32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtsw ((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline __m64
+_mm_cmpgt_pu32 (__m64 __m1, __m64 __m2)
+{
+ return (__m64) __builtin_arm_wcmpgtuw ((__v2si)__m1, (__v2si)__m2);
+}
+
+/* Element-wise multiplication of unsigned 16-bit values __B and __C, followed
+ by accumulate across all elements and __A. */
+static __inline __m64
+_mm_mac_pu16 (__m64 __A, __m64 __B, __m64 __C)
+{
+ return __builtin_arm_wmacu (__A, (__v4hi)__B, (__v4hi)__C);
+}
+
+/* Element-wise multiplication of signed 16-bit values __B and __C, followed
+ by accumulate across all elements and __A. */
+static __inline __m64
+_mm_mac_pi16 (__m64 __A, __m64 __B, __m64 __C)
+{
+ return __builtin_arm_wmacs (__A, (__v4hi)__B, (__v4hi)__C);
+}
+
+/* Element-wise multiplication of unsigned 16-bit values __B and __C, followed
+ by accumulate across all elements. */
+static __inline __m64
+_mm_macz_pu16 (__m64 __A, __m64 __B)
+{
+ return __builtin_arm_wmacuz ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Element-wise multiplication of signed 16-bit values __B and __C, followed
+ by accumulate across all elements. */
+static __inline __m64
+_mm_macz_pi16 (__m64 __A, __m64 __B)
+{
+ return __builtin_arm_wmacsz ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Accumulate across all unsigned 8-bit values in __A. */
+static __inline __m64
+_mm_acc_pu8 (__m64 __A)
+{
+ return __builtin_arm_waccb ((__v8qi)__A);
+}
+
+/* Accumulate across all unsigned 16-bit values in __A. */
+static __inline __m64
+_mm_acc_pu16 (__m64 __A)
+{
+ return __builtin_arm_wacch ((__v4hi)__A);
+}
+
+/* Accumulate across all unsigned 32-bit values in __A. */
+static __inline __m64
+_mm_acc_pu32 (__m64 __A)
+{
+ return __builtin_arm_waccw ((__v2si)__A);
+}
+
+static __inline __m64
+_mm_mia_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmia (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miaph_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiaph (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miabb_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiabb (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miabt_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiabt (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miatb_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiatb (__A, __B, __C);
+}
+
+static __inline __m64
+_mm_miatt_si64 (__m64 __A, int __B, int __C)
+{
+ return __builtin_arm_tmiatt (__A, __B, __C);
+}
+
+/* Extract one of the elements of A and sign extend. The selector N must
+ be immediate. */
+#define _mm_extract_pi8(A, N) __builtin_arm_textrmsb ((__v8qi)(A), (N))
+#define _mm_extract_pi16(A, N) __builtin_arm_textrmsh ((__v4hi)(A), (N))
+#define _mm_extract_pi32(A, N) __builtin_arm_textrmsw ((__v2si)(A), (N))
+
+/* Extract one of the elements of A and zero extend. The selector N must
+ be immediate. */
+#define _mm_extract_pu8(A, N) __builtin_arm_textrmub ((__v8qi)(A), (N))
+#define _mm_extract_pu16(A, N) __builtin_arm_textrmuh ((__v4hi)(A), (N))
+#define _mm_extract_pu32(A, N) __builtin_arm_textrmuw ((__v2si)(A), (N))
+
+/* Inserts word D into one of the elements of A. The selector N must be
+ immediate. */
+#define _mm_insert_pi8(A, D, N) \
+ ((__m64) __builtin_arm_tinsrb ((__v8qi)(A), (D), (N)))
+#define _mm_insert_pi16(A, D, N) \
+ ((__m64) __builtin_arm_tinsrh ((__v4hi)(A), (D), (N)))
+#define _mm_insert_pi32(A, D, N) \
+ ((__m64) __builtin_arm_tinsrw ((__v2si)(A), (D), (N)))
+
+/* Compute the element-wise maximum of signed 8-bit values. */
+static __inline __m64
+_mm_max_pi8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxsb ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the element-wise maximum of signed 16-bit values. */
+static __inline __m64
+_mm_max_pi16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxsh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the element-wise maximum of signed 32-bit values. */
+static __inline __m64
+_mm_max_pi32 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxsw ((__v2si)__A, (__v2si)__B);
+}
+
+/* Compute the element-wise maximum of unsigned 8-bit values. */
+static __inline __m64
+_mm_max_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxub ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the element-wise maximum of unsigned 16-bit values. */
+static __inline __m64
+_mm_max_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxuh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the element-wise maximum of unsigned 32-bit values. */
+static __inline __m64
+_mm_max_pu32 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wmaxuw ((__v2si)__A, (__v2si)__B);
+}
+
+/* Compute the element-wise minimum of signed 16-bit values. */
+static __inline __m64
+_mm_min_pi8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminsb ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the element-wise minimum of signed 16-bit values. */
+static __inline __m64
+_mm_min_pi16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminsh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the element-wise minimum of signed 32-bit values. */
+static __inline __m64
+_mm_min_pi32 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminsw ((__v2si)__A, (__v2si)__B);
+}
+
+/* Compute the element-wise minimum of unsigned 16-bit values. */
+static __inline __m64
+_mm_min_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminub ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the element-wise minimum of unsigned 16-bit values. */
+static __inline __m64
+_mm_min_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminuh ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the element-wise minimum of unsigned 32-bit values. */
+static __inline __m64
+_mm_min_pu32 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wminuw ((__v2si)__A, (__v2si)__B);
+}
+
+/* Create an 8-bit mask of the signs of 8-bit values. */
+static __inline int
+_mm_movemask_pi8 (__m64 __A)
+{
+ return __builtin_arm_tmovmskb ((__v8qi)__A);
+}
+
+/* Create an 8-bit mask of the signs of 16-bit values. */
+static __inline int
+_mm_movemask_pi16 (__m64 __A)
+{
+ return __builtin_arm_tmovmskh ((__v4hi)__A);
+}
+
+/* Create an 8-bit mask of the signs of 32-bit values. */
+static __inline int
+_mm_movemask_pi32 (__m64 __A)
+{
+ return __builtin_arm_tmovmskw ((__v2si)__A);
+}
+
+/* Return a combination of the four 16-bit values in A. The selector
+ must be an immediate. */
+#define _mm_shuffle_pi16(A, N) \
+ ((__m64) __builtin_arm_wshufh ((__v4hi)(A), (N)))
+
+
+/* Compute the rounded averages of the unsigned 8-bit values in A and B. */
+static __inline __m64
+_mm_avg_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wavg2br ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the rounded averages of the unsigned 16-bit values in A and B. */
+static __inline __m64
+_mm_avg_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wavg2hr ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the averages of the unsigned 8-bit values in A and B. */
+static __inline __m64
+_mm_avg2_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wavg2b ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the averages of the unsigned 16-bit values in A and B. */
+static __inline __m64
+_mm_avg2_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wavg2h ((__v4hi)__A, (__v4hi)__B);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 8-bit
+ values in A and B. Return the value in the lower 16-bit word; the
+ upper words are cleared. */
+static __inline __m64
+_mm_sad_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wsadbz ((__v8qi)__A, (__v8qi)__B);
+}
+
+static __inline __m64
+_mm_sada_pu8 (__m64 __A, __m64 __B, __m64 __C)
+{
+ return (__m64) __builtin_arm_wsadb ((__v2si)__A, (__v8qi)__B, (__v8qi)__C);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 16-bit
+ values in A and B. Return the value in the lower 32-bit word; the
+ upper words are cleared. */
+static __inline __m64
+_mm_sad_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wsadhz ((__v4hi)__A, (__v4hi)__B);
+}
+
+static __inline __m64
+_mm_sada_pu16 (__m64 __A, __m64 __B, __m64 __C)
+{
+ return (__m64) __builtin_arm_wsadh ((__v2si)__A, (__v4hi)__B, (__v4hi)__C);
+}
+
+
+/* Compute the sum of the absolute differences of the unsigned 8-bit
+ values in A and B. Return the value in the lower 16-bit word; the
+ upper words are cleared. */
+static __inline __m64
+_mm_sadz_pu8 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wsadbz ((__v8qi)__A, (__v8qi)__B);
+}
+
+/* Compute the sum of the absolute differences of the unsigned 16-bit
+ values in A and B. Return the value in the lower 32-bit word; the
+ upper words are cleared. */
+static __inline __m64
+_mm_sadz_pu16 (__m64 __A, __m64 __B)
+{
+ return (__m64) __builtin_arm_wsadhz ((__v4hi)__A, (__v4hi)__B);
+}
+
+#define _mm_align_si64(__A,__B, N) \
+ (__m64) __builtin_arm_walign ((__v8qi) (__A),(__v8qi) (__B), (N))
+
+/* Creates a 64-bit zero. */
+static __inline __m64
+_mm_setzero_si64 (void)
+{
+ return __builtin_arm_wzero ();
+}
+
+/* Set and Get arbitrary iWMMXt Control registers.
+ Note only registers 0-3 and 8-11 are currently defined,
+ the rest are reserved. */
+
+static __inline void
+_mm_setwcx (const int __value, const int __regno)
+{
+ switch (__regno)
+ {
+ case 0:
+ __asm __volatile ("tmcr wcid, %0" :: "r"(__value));
+ break;
+ case 1:
+ __asm __volatile ("tmcr wcon, %0" :: "r"(__value));
+ break;
+ case 2:
+ __asm __volatile ("tmcr wcssf, %0" :: "r"(__value));
+ break;
+ case 3:
+ __asm __volatile ("tmcr wcasf, %0" :: "r"(__value));
+ break;
+ case 8:
+ __builtin_arm_setwcgr0 (__value);
+ break;
+ case 9:
+ __builtin_arm_setwcgr1 (__value);
+ break;
+ case 10:
+ __builtin_arm_setwcgr2 (__value);
+ break;
+ case 11:
+ __builtin_arm_setwcgr3 (__value);
+ break;
+ default:
+ break;
+ }
+}
+
+static __inline int
+_mm_getwcx (const int __regno)
+{
+ int __value;
+ switch (__regno)
+ {
+ case 0:
+ __asm __volatile ("tmrc %0, wcid" : "=r"(__value));
+ break;
+ case 1:
+ __asm __volatile ("tmrc %0, wcon" : "=r"(__value));
+ break;
+ case 2:
+ __asm __volatile ("tmrc %0, wcssf" : "=r"(__value));
+ break;
+ case 3:
+ __asm __volatile ("tmrc %0, wcasf" : "=r"(__value));
+ break;
+ case 8:
+ return __builtin_arm_getwcgr0 ();
+ case 9:
+ return __builtin_arm_getwcgr1 ();
+ case 10:
+ return __builtin_arm_getwcgr2 ();
+ case 11:
+ return __builtin_arm_getwcgr3 ();
+ default:
+ break;
+ }
+ return __value;
+}
+
+/* Creates a vector of two 32-bit values; I0 is least significant. */
+static __inline __m64
+_mm_set_pi32 (int __i1, int __i0)
+{
+ union
+ {
+ __m64 __q;
+ struct
+ {
+ unsigned int __i0;
+ unsigned int __i1;
+ } __s;
+ } __u;
+
+ __u.__s.__i0 = __i0;
+ __u.__s.__i1 = __i1;
+
+ return __u.__q;
+}
+
+/* Creates a vector of four 16-bit values; W0 is least significant. */
+static __inline __m64
+_mm_set_pi16 (short __w3, short __w2, short __w1, short __w0)
+{
+ unsigned int __i1 = (unsigned short) __w3 << 16 | (unsigned short) __w2;
+ unsigned int __i0 = (unsigned short) __w1 << 16 | (unsigned short) __w0;
+
+ return _mm_set_pi32 (__i1, __i0);
+}
+
+/* Creates a vector of eight 8-bit values; B0 is least significant. */
+static __inline __m64
+_mm_set_pi8 (char __b7, char __b6, char __b5, char __b4,
+ char __b3, char __b2, char __b1, char __b0)
+{
+ unsigned int __i1, __i0;
+
+ __i1 = (unsigned char)__b7;
+ __i1 = __i1 << 8 | (unsigned char)__b6;
+ __i1 = __i1 << 8 | (unsigned char)__b5;
+ __i1 = __i1 << 8 | (unsigned char)__b4;
+
+ __i0 = (unsigned char)__b3;
+ __i0 = __i0 << 8 | (unsigned char)__b2;
+ __i0 = __i0 << 8 | (unsigned char)__b1;
+ __i0 = __i0 << 8 | (unsigned char)__b0;
+
+ return _mm_set_pi32 (__i1, __i0);
+}
+
+/* Similar, but with the arguments in reverse order. */
+static __inline __m64
+_mm_setr_pi32 (int __i0, int __i1)
+{
+ return _mm_set_pi32 (__i1, __i0);
+}
+
+static __inline __m64
+_mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3)
+{
+ return _mm_set_pi16 (__w3, __w2, __w1, __w0);
+}
+
+static __inline __m64
+_mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3,
+ char __b4, char __b5, char __b6, char __b7)
+{
+ return _mm_set_pi8 (__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
+}
+
+/* Creates a vector of two 32-bit values, both elements containing I. */
+static __inline __m64
+_mm_set1_pi32 (int __i)
+{
+ return _mm_set_pi32 (__i, __i);
+}
+
+/* Creates a vector of four 16-bit values, all elements containing W. */
+static __inline __m64
+_mm_set1_pi16 (short __w)
+{
+ unsigned int __i = (unsigned short)__w << 16 | (unsigned short)__w;
+ return _mm_set1_pi32 (__i);
+}
+
+/* Creates a vector of four 16-bit values, all elements containing B. */
+static __inline __m64
+_mm_set1_pi8 (char __b)
+{
+ unsigned int __w = (unsigned char)__b << 8 | (unsigned char)__b;
+ unsigned int __i = __w << 16 | __w;
+ return _mm_set1_pi32 (__i);
+}
+
+#ifdef __IWMMXT2__
+static __inline __m64
+_mm_abs_pi8 (__m64 m1)
+{
+ return (__m64) __builtin_arm_wabsb ((__v8qi)m1);
+}
+
+static __inline __m64
+_mm_abs_pi16 (__m64 m1)
+{
+ return (__m64) __builtin_arm_wabsh ((__v4hi)m1);
+
+}
+
+static __inline __m64
+_mm_abs_pi32 (__m64 m1)
+{
+ return (__m64) __builtin_arm_wabsw ((__v2si)m1);
+
+}
+
+static __inline __m64
+_mm_addsubhx_pi16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_waddsubhx ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_absdiff_pu8 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wabsdiffb ((__v8qi)a, (__v8qi)b);
+}
+
+static __inline __m64
+_mm_absdiff_pu16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wabsdiffh ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_absdiff_pu32 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wabsdiffw ((__v2si)a, (__v2si)b);
+}
+
+static __inline __m64
+_mm_addc_pu16 (__m64 a, __m64 b)
+{
+ __m64 result;
+ __asm__ __volatile__ ("waddhc %0, %1, %2" : "=y" (result) : "y" (a), "y" (b));
+ return result;
+}
+
+static __inline __m64
+_mm_addc_pu32 (__m64 a, __m64 b)
+{
+ __m64 result;
+ __asm__ __volatile__ ("waddwc %0, %1, %2" : "=y" (result) : "y" (a), "y" (b));
+ return result;
+}
+
+static __inline __m64
+_mm_avg4_pu8 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wavg4 ((__v8qi)a, (__v8qi)b);
+}
+
+static __inline __m64
+_mm_avg4r_pu8 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wavg4r ((__v8qi)a, (__v8qi)b);
+}
+
+static __inline __m64
+_mm_maddx_pi16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmaddsx ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_maddx_pu16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmaddux ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_msub_pi16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmaddsn ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_msub_pu16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmaddun ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_mulhi_pi32 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmulwsm ((__v2si)a, (__v2si)b);
+}
+
+static __inline __m64
+_mm_mulhi_pu32 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmulwum ((__v2si)a, (__v2si)b);
+}
+
+static __inline __m64
+_mm_mulhir_pi16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmulsmr ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_mulhir_pi32 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmulwsmr ((__v2si)a, (__v2si)b);
+}
+
+static __inline __m64
+_mm_mulhir_pu16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmulumr ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_mulhir_pu32 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmulwumr ((__v2si)a, (__v2si)b);
+}
+
+static __inline __m64
+_mm_mullo_pi32 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wmulwl ((__v2si)a, (__v2si)b);
+}
+
+static __inline __m64
+_mm_qmulm_pi16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wqmulm ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_qmulm_pi32 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wqmulwm ((__v2si)a, (__v2si)b);
+}
+
+static __inline __m64
+_mm_qmulmr_pi16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wqmulmr ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_qmulmr_pi32 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wqmulwmr ((__v2si)a, (__v2si)b);
+}
+
+static __inline __m64
+_mm_subaddhx_pi16 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_wsubaddhx ((__v4hi)a, (__v4hi)b);
+}
+
+static __inline __m64
+_mm_addbhusl_pu8 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_waddbhusl ((__v4hi)a, (__v8qi)b);
+}
+
+static __inline __m64
+_mm_addbhusm_pu8 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_waddbhusm ((__v4hi)a, (__v8qi)b);
+}
+
+#define _mm_qmiabb_pi32(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wqmiabb ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_qmiabbn_pi32(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wqmiabbn ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_qmiabt_pi32(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wqmiabt ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_qmiabtn_pi32(acc, m1, m2) \
+ ({\
+ __m64 _acc=acc;\
+ __m64 _m1=m1;\
+ __m64 _m2=m2;\
+ _acc = (__m64) __builtin_arm_wqmiabtn ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_qmiatb_pi32(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wqmiatb ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_qmiatbn_pi32(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wqmiatbn ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_qmiatt_pi32(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wqmiatt ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_qmiattn_pi32(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wqmiattn ((__v2si)_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiabb_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiabb (_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiabbn_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiabbn (_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiabt_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiabt (_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiabtn_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiabtn (_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiatb_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiatb (_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiatbn_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiatbn (_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiatt_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiatt (_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiattn_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiattn (_acc, (__v4hi)_m1, (__v4hi)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiawbb_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiawbb (_acc, (__v2si)_m1, (__v2si)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiawbbn_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiawbbn (_acc, (__v2si)_m1, (__v2si)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiawbt_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiawbt (_acc, (__v2si)_m1, (__v2si)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiawbtn_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiawbtn (_acc, (__v2si)_m1, (__v2si)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiawtb_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiawtb (_acc, (__v2si)_m1, (__v2si)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiawtbn_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiawtbn (_acc, (__v2si)_m1, (__v2si)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiawtt_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiawtt (_acc, (__v2si)_m1, (__v2si)_m2);\
+ _acc;\
+ })
+
+#define _mm_wmiawttn_si64(acc, m1, m2) \
+ ({\
+ __m64 _acc = acc;\
+ __m64 _m1 = m1;\
+ __m64 _m2 = m2;\
+ _acc = (__m64) __builtin_arm_wmiawttn (_acc, (__v2si)_m1, (__v2si)_m2);\
+ _acc;\
+ })
+
+/* The third arguments should be an immediate. */
+#define _mm_merge_si64(a, b, n) \
+ ({\
+ __m64 result;\
+ result = (__m64) __builtin_arm_wmerge ((__m64) (a), (__m64) (b), (n));\
+ result;\
+ })
+#endif /* __IWMMXT2__ */
+
+static __inline __m64
+_mm_alignr0_si64 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_walignr0 ((__v8qi) a, (__v8qi) b);
+}
+
+static __inline __m64
+_mm_alignr1_si64 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_walignr1 ((__v8qi) a, (__v8qi) b);
+}
+
+static __inline __m64
+_mm_alignr2_si64 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_walignr2 ((__v8qi) a, (__v8qi) b);
+}
+
+static __inline __m64
+_mm_alignr3_si64 (__m64 a, __m64 b)
+{
+ return (__m64) __builtin_arm_walignr3 ((__v8qi) a, (__v8qi) b);
+}
+
+static __inline void
+_mm_tandcb ()
+{
+ __asm __volatile ("tandcb r15");
+}
+
+static __inline void
+_mm_tandch ()
+{
+ __asm __volatile ("tandch r15");
+}
+
+static __inline void
+_mm_tandcw ()
+{
+ __asm __volatile ("tandcw r15");
+}
+
+#define _mm_textrcb(n) \
+ ({\
+ __asm__ __volatile__ (\
+ "textrcb r15, %0" : : "i" (n));\
+ })
+
+#define _mm_textrch(n) \
+ ({\
+ __asm__ __volatile__ (\
+ "textrch r15, %0" : : "i" (n));\
+ })
+
+#define _mm_textrcw(n) \
+ ({\
+ __asm__ __volatile__ (\
+ "textrcw r15, %0" : : "i" (n));\
+ })
+
+static __inline void
+_mm_torcb ()
+{
+ __asm __volatile ("torcb r15");
+}
+
+static __inline void
+_mm_torch ()
+{
+ __asm __volatile ("torch r15");
+}
+
+static __inline void
+_mm_torcw ()
+{
+ __asm __volatile ("torcw r15");
+}
+
+#ifdef __IWMMXT2__
+static __inline void
+_mm_torvscb ()
+{
+ __asm __volatile ("torvscb r15");
+}
+
+static __inline void
+_mm_torvsch ()
+{
+ __asm __volatile ("torvsch r15");
+}
+
+static __inline void
+_mm_torvscw ()
+{
+ __asm __volatile ("torvscw r15");
+}
+#endif /* __IWMMXT2__ */
+
+static __inline __m64
+_mm_tbcst_pi8 (int value)
+{
+ return (__m64) __builtin_arm_tbcstb ((signed char) value);
+}
+
+static __inline __m64
+_mm_tbcst_pi16 (int value)
+{
+ return (__m64) __builtin_arm_tbcsth ((short) value);
+}
+
+static __inline __m64
+_mm_tbcst_pi32 (int value)
+{
+ return (__m64) __builtin_arm_tbcstw (value);
+}
+
+#define _m_empty _mm_empty
+#define _m_packsswb _mm_packs_pi16
+#define _m_packssdw _mm_packs_pi32
+#define _m_packuswb _mm_packs_pu16
+#define _m_packusdw _mm_packs_pu32
+#define _m_packssqd _mm_packs_pi64
+#define _m_packusqd _mm_packs_pu64
+#define _mm_packs_si64 _mm_packs_pi64
+#define _mm_packs_su64 _mm_packs_pu64
+#define _m_punpckhbw _mm_unpackhi_pi8
+#define _m_punpckhwd _mm_unpackhi_pi16
+#define _m_punpckhdq _mm_unpackhi_pi32
+#define _m_punpcklbw _mm_unpacklo_pi8
+#define _m_punpcklwd _mm_unpacklo_pi16
+#define _m_punpckldq _mm_unpacklo_pi32
+#define _m_punpckehsbw _mm_unpackeh_pi8
+#define _m_punpckehswd _mm_unpackeh_pi16
+#define _m_punpckehsdq _mm_unpackeh_pi32
+#define _m_punpckehubw _mm_unpackeh_pu8
+#define _m_punpckehuwd _mm_unpackeh_pu16
+#define _m_punpckehudq _mm_unpackeh_pu32
+#define _m_punpckelsbw _mm_unpackel_pi8
+#define _m_punpckelswd _mm_unpackel_pi16
+#define _m_punpckelsdq _mm_unpackel_pi32
+#define _m_punpckelubw _mm_unpackel_pu8
+#define _m_punpckeluwd _mm_unpackel_pu16
+#define _m_punpckeludq _mm_unpackel_pu32
+#define _m_paddb _mm_add_pi8
+#define _m_paddw _mm_add_pi16
+#define _m_paddd _mm_add_pi32
+#define _m_paddsb _mm_adds_pi8
+#define _m_paddsw _mm_adds_pi16
+#define _m_paddsd _mm_adds_pi32
+#define _m_paddusb _mm_adds_pu8
+#define _m_paddusw _mm_adds_pu16
+#define _m_paddusd _mm_adds_pu32
+#define _m_psubb _mm_sub_pi8
+#define _m_psubw _mm_sub_pi16
+#define _m_psubd _mm_sub_pi32
+#define _m_psubsb _mm_subs_pi8
+#define _m_psubsw _mm_subs_pi16
+#define _m_psubuw _mm_subs_pi32
+#define _m_psubusb _mm_subs_pu8
+#define _m_psubusw _mm_subs_pu16
+#define _m_psubusd _mm_subs_pu32
+#define _m_pmaddwd _mm_madd_pi16
+#define _m_pmadduwd _mm_madd_pu16
+#define _m_pmulhw _mm_mulhi_pi16
+#define _m_pmulhuw _mm_mulhi_pu16
+#define _m_pmullw _mm_mullo_pi16
+#define _m_pmacsw _mm_mac_pi16
+#define _m_pmacuw _mm_mac_pu16
+#define _m_pmacszw _mm_macz_pi16
+#define _m_pmacuzw _mm_macz_pu16
+#define _m_paccb _mm_acc_pu8
+#define _m_paccw _mm_acc_pu16
+#define _m_paccd _mm_acc_pu32
+#define _m_pmia _mm_mia_si64
+#define _m_pmiaph _mm_miaph_si64
+#define _m_pmiabb _mm_miabb_si64
+#define _m_pmiabt _mm_miabt_si64
+#define _m_pmiatb _mm_miatb_si64
+#define _m_pmiatt _mm_miatt_si64
+#define _m_psllw _mm_sll_pi16
+#define _m_psllwi _mm_slli_pi16
+#define _m_pslld _mm_sll_pi32
+#define _m_pslldi _mm_slli_pi32
+#define _m_psllq _mm_sll_si64
+#define _m_psllqi _mm_slli_si64
+#define _m_psraw _mm_sra_pi16
+#define _m_psrawi _mm_srai_pi16
+#define _m_psrad _mm_sra_pi32
+#define _m_psradi _mm_srai_pi32
+#define _m_psraq _mm_sra_si64
+#define _m_psraqi _mm_srai_si64
+#define _m_psrlw _mm_srl_pi16
+#define _m_psrlwi _mm_srli_pi16
+#define _m_psrld _mm_srl_pi32
+#define _m_psrldi _mm_srli_pi32
+#define _m_psrlq _mm_srl_si64
+#define _m_psrlqi _mm_srli_si64
+#define _m_prorw _mm_ror_pi16
+#define _m_prorwi _mm_rori_pi16
+#define _m_prord _mm_ror_pi32
+#define _m_prordi _mm_rori_pi32
+#define _m_prorq _mm_ror_si64
+#define _m_prorqi _mm_rori_si64
+#define _m_pand _mm_and_si64
+#define _m_pandn _mm_andnot_si64
+#define _m_por _mm_or_si64
+#define _m_pxor _mm_xor_si64
+#define _m_pcmpeqb _mm_cmpeq_pi8
+#define _m_pcmpeqw _mm_cmpeq_pi16
+#define _m_pcmpeqd _mm_cmpeq_pi32
+#define _m_pcmpgtb _mm_cmpgt_pi8
+#define _m_pcmpgtub _mm_cmpgt_pu8
+#define _m_pcmpgtw _mm_cmpgt_pi16
+#define _m_pcmpgtuw _mm_cmpgt_pu16
+#define _m_pcmpgtd _mm_cmpgt_pi32
+#define _m_pcmpgtud _mm_cmpgt_pu32
+#define _m_pextrb _mm_extract_pi8
+#define _m_pextrw _mm_extract_pi16
+#define _m_pextrd _mm_extract_pi32
+#define _m_pextrub _mm_extract_pu8
+#define _m_pextruw _mm_extract_pu16
+#define _m_pextrud _mm_extract_pu32
+#define _m_pinsrb _mm_insert_pi8
+#define _m_pinsrw _mm_insert_pi16
+#define _m_pinsrd _mm_insert_pi32
+#define _m_pmaxsb _mm_max_pi8
+#define _m_pmaxsw _mm_max_pi16
+#define _m_pmaxsd _mm_max_pi32
+#define _m_pmaxub _mm_max_pu8
+#define _m_pmaxuw _mm_max_pu16
+#define _m_pmaxud _mm_max_pu32
+#define _m_pminsb _mm_min_pi8
+#define _m_pminsw _mm_min_pi16
+#define _m_pminsd _mm_min_pi32
+#define _m_pminub _mm_min_pu8
+#define _m_pminuw _mm_min_pu16
+#define _m_pminud _mm_min_pu32
+#define _m_pmovmskb _mm_movemask_pi8
+#define _m_pmovmskw _mm_movemask_pi16
+#define _m_pmovmskd _mm_movemask_pi32
+#define _m_pshufw _mm_shuffle_pi16
+#define _m_pavgb _mm_avg_pu8
+#define _m_pavgw _mm_avg_pu16
+#define _m_pavg2b _mm_avg2_pu8
+#define _m_pavg2w _mm_avg2_pu16
+#define _m_psadbw _mm_sad_pu8
+#define _m_psadwd _mm_sad_pu16
+#define _m_psadzbw _mm_sadz_pu8
+#define _m_psadzwd _mm_sadz_pu16
+#define _m_paligniq _mm_align_si64
+#define _m_cvt_si2pi _mm_cvtsi64_m64
+#define _m_cvt_pi2si _mm_cvtm64_si64
+#define _m_from_int _mm_cvtsi32_si64
+#define _m_to_int _mm_cvtsi64_si32
+
+#if defined __cplusplus
+}; /* End "C" */
+#endif /* __cplusplus */
+
+#endif /* _MMINTRIN_H_INCLUDED */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/omp.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/omp.h
new file mode 100644
index 0000000..b1824b5
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/omp.h
@@ -0,0 +1,127 @@
+/* Copyright (C) 2005-2014 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU OpenMP Library (libgomp).
+
+ Libgomp is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _OMP_H
+#define _OMP_H 1
+
+#ifndef _LIBGOMP_OMP_LOCK_DEFINED
+#define _LIBGOMP_OMP_LOCK_DEFINED 1
+/* These two structures get edited by the libgomp build process to
+ reflect the shape of the two types. Their internals are private
+ to the library. */
+
+typedef struct
+{
+ unsigned char _x[4]
+ __attribute__((__aligned__(4)));
+} omp_lock_t;
+
+typedef struct
+{
+ unsigned char _x[12]
+ __attribute__((__aligned__(4)));
+} omp_nest_lock_t;
+#endif
+
+typedef enum omp_sched_t
+{
+ omp_sched_static = 1,
+ omp_sched_dynamic = 2,
+ omp_sched_guided = 3,
+ omp_sched_auto = 4
+} omp_sched_t;
+
+typedef enum omp_proc_bind_t
+{
+ omp_proc_bind_false = 0,
+ omp_proc_bind_true = 1,
+ omp_proc_bind_master = 2,
+ omp_proc_bind_close = 3,
+ omp_proc_bind_spread = 4
+} omp_proc_bind_t;
+
+#ifdef __cplusplus
+extern "C" {
+# define __GOMP_NOTHROW throw ()
+#else
+# define __GOMP_NOTHROW __attribute__((__nothrow__))
+#endif
+
+extern void omp_set_num_threads (int) __GOMP_NOTHROW;
+extern int omp_get_num_threads (void) __GOMP_NOTHROW;
+extern int omp_get_max_threads (void) __GOMP_NOTHROW;
+extern int omp_get_thread_num (void) __GOMP_NOTHROW;
+extern int omp_get_num_procs (void) __GOMP_NOTHROW;
+
+extern int omp_in_parallel (void) __GOMP_NOTHROW;
+
+extern void omp_set_dynamic (int) __GOMP_NOTHROW;
+extern int omp_get_dynamic (void) __GOMP_NOTHROW;
+
+extern void omp_set_nested (int) __GOMP_NOTHROW;
+extern int omp_get_nested (void) __GOMP_NOTHROW;
+
+extern void omp_init_lock (omp_lock_t *) __GOMP_NOTHROW;
+extern void omp_destroy_lock (omp_lock_t *) __GOMP_NOTHROW;
+extern void omp_set_lock (omp_lock_t *) __GOMP_NOTHROW;
+extern void omp_unset_lock (omp_lock_t *) __GOMP_NOTHROW;
+extern int omp_test_lock (omp_lock_t *) __GOMP_NOTHROW;
+
+extern void omp_init_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW;
+extern void omp_destroy_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW;
+extern void omp_set_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW;
+extern void omp_unset_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW;
+extern int omp_test_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW;
+
+extern double omp_get_wtime (void) __GOMP_NOTHROW;
+extern double omp_get_wtick (void) __GOMP_NOTHROW;
+
+extern void omp_set_schedule (omp_sched_t, int) __GOMP_NOTHROW;
+extern void omp_get_schedule (omp_sched_t *, int *) __GOMP_NOTHROW;
+extern int omp_get_thread_limit (void) __GOMP_NOTHROW;
+extern void omp_set_max_active_levels (int) __GOMP_NOTHROW;
+extern int omp_get_max_active_levels (void) __GOMP_NOTHROW;
+extern int omp_get_level (void) __GOMP_NOTHROW;
+extern int omp_get_ancestor_thread_num (int) __GOMP_NOTHROW;
+extern int omp_get_team_size (int) __GOMP_NOTHROW;
+extern int omp_get_active_level (void) __GOMP_NOTHROW;
+
+extern int omp_in_final (void) __GOMP_NOTHROW;
+
+extern int omp_get_cancellation (void) __GOMP_NOTHROW;
+extern omp_proc_bind_t omp_get_proc_bind (void) __GOMP_NOTHROW;
+
+extern void omp_set_default_device (int) __GOMP_NOTHROW;
+extern int omp_get_default_device (void) __GOMP_NOTHROW;
+extern int omp_get_num_devices (void) __GOMP_NOTHROW;
+extern int omp_get_num_teams (void) __GOMP_NOTHROW;
+extern int omp_get_team_num (void) __GOMP_NOTHROW;
+
+extern int omp_is_initial_device (void) __GOMP_NOTHROW;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _OMP_H */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdalign.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdalign.h
new file mode 100644
index 0000000..ee2d81f
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdalign.h
@@ -0,0 +1,39 @@
+/* Copyright (C) 2011-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* ISO C1X: 7.15 Alignment <stdalign.h>. */
+
+#ifndef _STDALIGN_H
+#define _STDALIGN_H
+
+#ifndef __cplusplus
+
+#define alignas _Alignas
+#define alignof _Alignof
+
+#define __alignas_is_defined 1
+#define __alignof_is_defined 1
+
+#endif
+
+#endif /* stdalign.h */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdarg.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdarg.h
new file mode 100644
index 0000000..1d4418b
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdarg.h
@@ -0,0 +1,126 @@
+/* Copyright (C) 1989-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 7.15 Variable arguments <stdarg.h>
+ */
+
+#ifndef _STDARG_H
+#ifndef _ANSI_STDARG_H_
+#ifndef __need___va_list
+#define _STDARG_H
+#define _ANSI_STDARG_H_
+#endif /* not __need___va_list */
+#undef __need___va_list
+
+/* Define __gnuc_va_list. */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+typedef __builtin_va_list __gnuc_va_list;
+#endif
+
+/* Define the standard macros for the user,
+ if this invocation was from the user program. */
+#ifdef _STDARG_H
+
+#define va_start(v,l) __builtin_va_start(v,l)
+#define va_end(v) __builtin_va_end(v)
+#define va_arg(v,l) __builtin_va_arg(v,l)
+#if !defined(__STRICT_ANSI__) || __STDC_VERSION__ + 0 >= 199900L || defined(__GXX_EXPERIMENTAL_CXX0X__)
+#define va_copy(d,s) __builtin_va_copy(d,s)
+#endif
+#define __va_copy(d,s) __builtin_va_copy(d,s)
+
+/* Define va_list, if desired, from __gnuc_va_list. */
+/* We deliberately do not define va_list when called from
+ stdio.h, because ANSI C says that stdio.h is not supposed to define
+ va_list. stdio.h needs to have access to that data type,
+ but must not use that name. It should use the name __gnuc_va_list,
+ which is safe because it is reserved for the implementation. */
+
+#ifdef _BSD_VA_LIST
+#undef _BSD_VA_LIST
+#endif
+
+#if defined(__svr4__) || (defined(_SCO_DS) && !defined(__VA_LIST))
+/* SVR4.2 uses _VA_LIST for an internal alias for va_list,
+ so we must avoid testing it and setting it here.
+ SVR4 uses _VA_LIST as a flag in stdarg.h, but we should
+ have no conflict with that. */
+#ifndef _VA_LIST_
+#define _VA_LIST_
+#ifdef __i860__
+#ifndef _VA_LIST
+#define _VA_LIST va_list
+#endif
+#endif /* __i860__ */
+typedef __gnuc_va_list va_list;
+#ifdef _SCO_DS
+#define __VA_LIST
+#endif
+#endif /* _VA_LIST_ */
+#else /* not __svr4__ || _SCO_DS */
+
+/* The macro _VA_LIST_ is the same thing used by this file in Ultrix.
+ But on BSD NET2 we must not test or define or undef it.
+ (Note that the comments in NET 2's ansi.h
+ are incorrect for _VA_LIST_--see stdio.h!) */
+#if !defined (_VA_LIST_) || defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__) || defined(WINNT)
+/* The macro _VA_LIST_DEFINED is used in Windows NT 3.5 */
+#ifndef _VA_LIST_DEFINED
+/* The macro _VA_LIST is used in SCO Unix 3.2. */
+#ifndef _VA_LIST
+/* The macro _VA_LIST_T_H is used in the Bull dpx2 */
+#ifndef _VA_LIST_T_H
+/* The macro __va_list__ is used by BeOS. */
+#ifndef __va_list__
+typedef __gnuc_va_list va_list;
+#endif /* not __va_list__ */
+#endif /* not _VA_LIST_T_H */
+#endif /* not _VA_LIST */
+#endif /* not _VA_LIST_DEFINED */
+#if !(defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__))
+#define _VA_LIST_
+#endif
+#ifndef _VA_LIST
+#define _VA_LIST
+#endif
+#ifndef _VA_LIST_DEFINED
+#define _VA_LIST_DEFINED
+#endif
+#ifndef _VA_LIST_T_H
+#define _VA_LIST_T_H
+#endif
+#ifndef __va_list__
+#define __va_list__
+#endif
+
+#endif /* not _VA_LIST_, except on certain systems */
+
+#endif /* not __svr4__ */
+
+#endif /* _STDARG_H */
+
+#endif /* not _ANSI_STDARG_H_ */
+#endif /* not _STDARG_H */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdatomic.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdatomic.h
new file mode 100644
index 0000000..108259b
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdatomic.h
@@ -0,0 +1,252 @@
+/* Copyright (C) 2013-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* ISO C11 Standard: 7.17 Atomics <stdatomic.h>. */
+
+#ifndef _STDATOMIC_H
+#define _STDATOMIC_H
+
+typedef enum
+ {
+ memory_order_relaxed = __ATOMIC_RELAXED,
+ memory_order_consume = __ATOMIC_CONSUME,
+ memory_order_acquire = __ATOMIC_ACQUIRE,
+ memory_order_release = __ATOMIC_RELEASE,
+ memory_order_acq_rel = __ATOMIC_ACQ_REL,
+ memory_order_seq_cst = __ATOMIC_SEQ_CST
+ } memory_order;
+
+
+typedef _Atomic _Bool atomic_bool;
+typedef _Atomic char atomic_char;
+typedef _Atomic signed char atomic_schar;
+typedef _Atomic unsigned char atomic_uchar;
+typedef _Atomic short atomic_short;
+typedef _Atomic unsigned short atomic_ushort;
+typedef _Atomic int atomic_int;
+typedef _Atomic unsigned int atomic_uint;
+typedef _Atomic long atomic_long;
+typedef _Atomic unsigned long atomic_ulong;
+typedef _Atomic long long atomic_llong;
+typedef _Atomic unsigned long long atomic_ullong;
+typedef _Atomic __CHAR16_TYPE__ atomic_char16_t;
+typedef _Atomic __CHAR32_TYPE__ atomic_char32_t;
+typedef _Atomic __WCHAR_TYPE__ atomic_wchar_t;
+typedef _Atomic __INT_LEAST8_TYPE__ atomic_int_least8_t;
+typedef _Atomic __UINT_LEAST8_TYPE__ atomic_uint_least8_t;
+typedef _Atomic __INT_LEAST16_TYPE__ atomic_int_least16_t;
+typedef _Atomic __UINT_LEAST16_TYPE__ atomic_uint_least16_t;
+typedef _Atomic __INT_LEAST32_TYPE__ atomic_int_least32_t;
+typedef _Atomic __UINT_LEAST32_TYPE__ atomic_uint_least32_t;
+typedef _Atomic __INT_LEAST64_TYPE__ atomic_int_least64_t;
+typedef _Atomic __UINT_LEAST64_TYPE__ atomic_uint_least64_t;
+typedef _Atomic __INT_FAST8_TYPE__ atomic_int_fast8_t;
+typedef _Atomic __UINT_FAST8_TYPE__ atomic_uint_fast8_t;
+typedef _Atomic __INT_FAST16_TYPE__ atomic_int_fast16_t;
+typedef _Atomic __UINT_FAST16_TYPE__ atomic_uint_fast16_t;
+typedef _Atomic __INT_FAST32_TYPE__ atomic_int_fast32_t;
+typedef _Atomic __UINT_FAST32_TYPE__ atomic_uint_fast32_t;
+typedef _Atomic __INT_FAST64_TYPE__ atomic_int_fast64_t;
+typedef _Atomic __UINT_FAST64_TYPE__ atomic_uint_fast64_t;
+typedef _Atomic __INTPTR_TYPE__ atomic_intptr_t;
+typedef _Atomic __UINTPTR_TYPE__ atomic_uintptr_t;
+typedef _Atomic __SIZE_TYPE__ atomic_size_t;
+typedef _Atomic __PTRDIFF_TYPE__ atomic_ptrdiff_t;
+typedef _Atomic __INTMAX_TYPE__ atomic_intmax_t;
+typedef _Atomic __UINTMAX_TYPE__ atomic_uintmax_t;
+
+
+#define ATOMIC_VAR_INIT(VALUE) (VALUE)
+#define atomic_init(PTR, VAL) \
+ do \
+ { \
+ *(PTR) = (VAL); \
+ } \
+ while (0)
+
+#define kill_dependency(Y) \
+ __extension__ \
+ ({ \
+ __auto_type __kill_dependency_tmp = (Y); \
+ __kill_dependency_tmp; \
+ })
+
+#define atomic_thread_fence(MO) __atomic_thread_fence (MO)
+#define atomic_signal_fence(MO) __atomic_signal_fence (MO)
+#define atomic_is_lock_free(OBJ) __atomic_is_lock_free (sizeof (*(OBJ)), (OBJ))
+
+#define __atomic_type_lock_free(T) \
+ (__atomic_always_lock_free (sizeof (T), (void *) 0) \
+ ? 2 \
+ : (__atomic_is_lock_free (sizeof (T), (void *) 0) ? 1 : 0))
+#define ATOMIC_BOOL_LOCK_FREE \
+ __atomic_type_lock_free (atomic_bool)
+#define ATOMIC_CHAR_LOCK_FREE \
+ __atomic_type_lock_free (atomic_char)
+#define ATOMIC_CHAR16_T_LOCK_FREE \
+ __atomic_type_lock_free (atomic_char16_t)
+#define ATOMIC_CHAR32_T_LOCK_FREE \
+ __atomic_type_lock_free (atomic_char32_t)
+#define ATOMIC_WCHAR_T_LOCK_FREE \
+ __atomic_type_lock_free (atomic_wchar_t)
+#define ATOMIC_SHORT_LOCK_FREE \
+ __atomic_type_lock_free (atomic_short)
+#define ATOMIC_INT_LOCK_FREE \
+ __atomic_type_lock_free (atomic_int)
+#define ATOMIC_LONG_LOCK_FREE \
+ __atomic_type_lock_free (atomic_long)
+#define ATOMIC_LLONG_LOCK_FREE \
+ __atomic_type_lock_free (atomic_llong)
+#define ATOMIC_POINTER_LOCK_FREE \
+ __atomic_type_lock_free (void * _Atomic)
+
+
+/* Note that these macros require __typeof__ and __auto_type to remove
+ _Atomic qualifiers (and const qualifiers, if those are valid on
+ macro operands).
+
+ Also note that the header file uses the generic form of __atomic
+ builtins, which requires the address to be taken of the value
+ parameter, and then we pass that value on. This allows the macros
+ to work for any type, and the compiler is smart enough to convert
+ these to lock-free _N variants if possible, and throw away the
+ temps. */
+
+#define atomic_store_explicit(PTR, VAL, MO) \
+ __extension__ \
+ ({ \
+ __auto_type __atomic_store_ptr = (PTR); \
+ __typeof__ (*__atomic_store_ptr) __atomic_store_tmp = (VAL); \
+ __atomic_store (__atomic_store_ptr, &__atomic_store_tmp, (MO)); \
+ })
+
+#define atomic_store(PTR, VAL) \
+ atomic_store_explicit (PTR, VAL, __ATOMIC_SEQ_CST)
+
+
+#define atomic_load_explicit(PTR, MO) \
+ __extension__ \
+ ({ \
+ __auto_type __atomic_load_ptr = (PTR); \
+ __typeof__ (*__atomic_load_ptr) __atomic_load_tmp; \
+ __atomic_load (__atomic_load_ptr, &__atomic_load_tmp, (MO)); \
+ __atomic_load_tmp; \
+ })
+
+#define atomic_load(PTR) atomic_load_explicit (PTR, __ATOMIC_SEQ_CST)
+
+
+#define atomic_exchange_explicit(PTR, VAL, MO) \
+ __extension__ \
+ ({ \
+ __auto_type __atomic_exchange_ptr = (PTR); \
+ __typeof__ (*__atomic_exchange_ptr) __atomic_exchange_val = (VAL); \
+ __typeof__ (*__atomic_exchange_ptr) __atomic_exchange_tmp; \
+ __atomic_exchange (__atomic_exchange_ptr, &__atomic_exchange_val, \
+ &__atomic_exchange_tmp, (MO)); \
+ __atomic_exchange_tmp; \
+ })
+
+#define atomic_exchange(PTR, VAL) \
+ atomic_exchange_explicit (PTR, VAL, __ATOMIC_SEQ_CST)
+
+
+#define atomic_compare_exchange_strong_explicit(PTR, VAL, DES, SUC, FAIL) \
+ __extension__ \
+ ({ \
+ __auto_type __atomic_compare_exchange_ptr = (PTR); \
+ __typeof__ (*__atomic_compare_exchange_ptr) __atomic_compare_exchange_tmp \
+ = (DES); \
+ __atomic_compare_exchange (__atomic_compare_exchange_ptr, (VAL), \
+ &__atomic_compare_exchange_tmp, 0, \
+ (SUC), (FAIL)); \
+ })
+
+#define atomic_compare_exchange_strong(PTR, VAL, DES) \
+ atomic_compare_exchange_strong_explicit (PTR, VAL, DES, __ATOMIC_SEQ_CST, \
+ __ATOMIC_SEQ_CST)
+
+#define atomic_compare_exchange_weak_explicit(PTR, VAL, DES, SUC, FAIL) \
+ __extension__ \
+ ({ \
+ __auto_type __atomic_compare_exchange_ptr = (PTR); \
+ __typeof__ (*__atomic_compare_exchange_ptr) __atomic_compare_exchange_tmp \
+ = (DES); \
+ __atomic_compare_exchange (__atomic_compare_exchange_ptr, (VAL), \
+ &__atomic_compare_exchange_tmp, 1, \
+ (SUC), (FAIL)); \
+ })
+
+#define atomic_compare_exchange_weak(PTR, VAL, DES) \
+ atomic_compare_exchange_weak_explicit (PTR, VAL, DES, __ATOMIC_SEQ_CST, \
+ __ATOMIC_SEQ_CST)
+
+
+
+#define atomic_fetch_add(PTR, VAL) __atomic_fetch_add ((PTR), (VAL), \
+ __ATOMIC_SEQ_CST)
+#define atomic_fetch_add_explicit(PTR, VAL, MO) \
+ __atomic_fetch_add ((PTR), (VAL), (MO))
+
+#define atomic_fetch_sub(PTR, VAL) __atomic_fetch_sub ((PTR), (VAL), \
+ __ATOMIC_SEQ_CST)
+#define atomic_fetch_sub_explicit(PTR, VAL, MO) \
+ __atomic_fetch_sub ((PTR), (VAL), (MO))
+
+#define atomic_fetch_or(PTR, VAL) __atomic_fetch_or ((PTR), (VAL), \
+ __ATOMIC_SEQ_CST)
+#define atomic_fetch_or_explicit(PTR, VAL, MO) \
+ __atomic_fetch_or ((PTR), (VAL), (MO))
+
+#define atomic_fetch_xor(PTR, VAL) __atomic_fetch_xor ((PTR), (VAL), \
+ __ATOMIC_SEQ_CST)
+#define atomic_fetch_xor_explicit(PTR, VAL, MO) \
+ __atomic_fetch_xor ((PTR), (VAL), (MO))
+
+#define atomic_fetch_and(PTR, VAL) __atomic_fetch_and ((PTR), (VAL), \
+ __ATOMIC_SEQ_CST)
+#define atomic_fetch_and_explicit(PTR, VAL, MO) \
+ __atomic_fetch_and ((PTR), (VAL), (MO))
+
+
+typedef _Atomic struct
+{
+#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
+ _Bool __val;
+#else
+ unsigned char __val;
+#endif
+} atomic_flag;
+
+#define ATOMIC_FLAG_INIT { 0 }
+
+
+#define atomic_flag_test_and_set(PTR) \
+ __atomic_test_and_set ((PTR), __ATOMIC_SEQ_CST)
+#define atomic_flag_test_and_set_explicit(PTR, MO) \
+ __atomic_test_and_set ((PTR), (MO))
+
+#define atomic_flag_clear(PTR) __atomic_clear ((PTR), __ATOMIC_SEQ_CST)
+#define atomic_flag_clear_explicit(PTR, MO) __atomic_clear ((PTR), (MO))
+
+#endif /* _STDATOMIC_H */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdbool.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdbool.h
new file mode 100644
index 0000000..f4e802f
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdbool.h
@@ -0,0 +1,50 @@
+/* Copyright (C) 1998-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 7.16 Boolean type and values <stdbool.h>
+ */
+
+#ifndef _STDBOOL_H
+#define _STDBOOL_H
+
+#ifndef __cplusplus
+
+#define bool _Bool
+#define true 1
+#define false 0
+
+#else /* __cplusplus */
+
+/* Supporting <stdbool.h> in C++ is a GCC extension. */
+#define _Bool bool
+#define bool bool
+#define false false
+#define true true
+
+#endif /* __cplusplus */
+
+/* Signal that all the definitions are present. */
+#define __bool_true_false_are_defined 1
+
+#endif /* stdbool.h */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stddef.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stddef.h
new file mode 100644
index 0000000..cfa8df3
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stddef.h
@@ -0,0 +1,439 @@
+/* Copyright (C) 1989-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 7.17 Common definitions <stddef.h>
+ */
+#if (!defined(_STDDEF_H) && !defined(_STDDEF_H_) && !defined(_ANSI_STDDEF_H) \
+ && !defined(__STDDEF_H__)) \
+ || defined(__need_wchar_t) || defined(__need_size_t) \
+ || defined(__need_ptrdiff_t) || defined(__need_NULL) \
+ || defined(__need_wint_t)
+
+/* Any one of these symbols __need_* means that GNU libc
+ wants us just to define one data type. So don't define
+ the symbols that indicate this file's entire job has been done. */
+#if (!defined(__need_wchar_t) && !defined(__need_size_t) \
+ && !defined(__need_ptrdiff_t) && !defined(__need_NULL) \
+ && !defined(__need_wint_t))
+#define _STDDEF_H
+#define _STDDEF_H_
+/* snaroff@next.com says the NeXT needs this. */
+#define _ANSI_STDDEF_H
+#endif
+
+#ifndef __sys_stdtypes_h
+/* This avoids lossage on SunOS but only if stdtypes.h comes first.
+ There's no way to win with the other order! Sun lossage. */
+
+/* On 4.3bsd-net2, make sure ansi.h is included, so we have
+ one less case to deal with in the following. */
+#if defined (__BSD_NET2__) || defined (____386BSD____) || (defined (__FreeBSD__) && (__FreeBSD__ < 5)) || defined(__NetBSD__)
+#include <machine/ansi.h>
+#endif
+/* On FreeBSD 5, machine/ansi.h does not exist anymore... */
+#if defined (__FreeBSD__) && (__FreeBSD__ >= 5)
+#include <sys/_types.h>
+#endif
+
+/* In 4.3bsd-net2, machine/ansi.h defines these symbols, which are
+ defined if the corresponding type is *not* defined.
+ FreeBSD-2.1 defines _MACHINE_ANSI_H_ instead of _ANSI_H_.
+ NetBSD defines _I386_ANSI_H_ and _X86_64_ANSI_H_ instead of _ANSI_H_ */
+#if defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) || defined(_X86_64_ANSI_H_) || defined(_I386_ANSI_H_)
+#if !defined(_SIZE_T_) && !defined(_BSD_SIZE_T_)
+#define _SIZE_T
+#endif
+#if !defined(_PTRDIFF_T_) && !defined(_BSD_PTRDIFF_T_)
+#define _PTRDIFF_T
+#endif
+/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_
+ instead of _WCHAR_T_. */
+#if !defined(_WCHAR_T_) && !defined(_BSD_WCHAR_T_)
+#ifndef _BSD_WCHAR_T_
+#define _WCHAR_T
+#endif
+#endif
+/* Undef _FOO_T_ if we are supposed to define foo_t. */
+#if defined (__need_ptrdiff_t) || defined (_STDDEF_H_)
+#undef _PTRDIFF_T_
+#undef _BSD_PTRDIFF_T_
+#endif
+#if defined (__need_size_t) || defined (_STDDEF_H_)
+#undef _SIZE_T_
+#undef _BSD_SIZE_T_
+#endif
+#if defined (__need_wchar_t) || defined (_STDDEF_H_)
+#undef _WCHAR_T_
+#undef _BSD_WCHAR_T_
+#endif
+#endif /* defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) || defined(_X86_64_ANSI_H_) || defined(_I386_ANSI_H_) */
+
+/* Sequent's header files use _PTRDIFF_T_ in some conflicting way.
+ Just ignore it. */
+#if defined (__sequent__) && defined (_PTRDIFF_T_)
+#undef _PTRDIFF_T_
+#endif
+
+/* On VxWorks, <type/vxTypesBase.h> may have defined macros like
+ _TYPE_size_t which will typedef size_t. fixincludes patched the
+ vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is
+ not defined, and so that defining this macro defines _GCC_SIZE_T.
+ If we find that the macros are still defined at this point, we must
+ invoke them so that the type is defined as expected. */
+#if defined (_TYPE_ptrdiff_t) && (defined (__need_ptrdiff_t) || defined (_STDDEF_H_))
+_TYPE_ptrdiff_t;
+#undef _TYPE_ptrdiff_t
+#endif
+#if defined (_TYPE_size_t) && (defined (__need_size_t) || defined (_STDDEF_H_))
+_TYPE_size_t;
+#undef _TYPE_size_t
+#endif
+#if defined (_TYPE_wchar_t) && (defined (__need_wchar_t) || defined (_STDDEF_H_))
+_TYPE_wchar_t;
+#undef _TYPE_wchar_t
+#endif
+
+/* In case nobody has defined these types, but we aren't running under
+ GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and
+ __WCHAR_TYPE__ have reasonable values. This can happen if the
+ parts of GCC is compiled by an older compiler, that actually
+ include gstddef.h, such as collect2. */
+
+/* Signed type of difference of two pointers. */
+
+/* Define this type if we are doing the whole job,
+ or if we want this type in particular. */
+#if defined (_STDDEF_H) || defined (__need_ptrdiff_t)
+#ifndef _PTRDIFF_T /* in case <sys/types.h> has defined it. */
+#ifndef _T_PTRDIFF_
+#ifndef _T_PTRDIFF
+#ifndef __PTRDIFF_T
+#ifndef _PTRDIFF_T_
+#ifndef _BSD_PTRDIFF_T_
+#ifndef ___int_ptrdiff_t_h
+#ifndef _GCC_PTRDIFF_T
+#define _PTRDIFF_T
+#define _T_PTRDIFF_
+#define _T_PTRDIFF
+#define __PTRDIFF_T
+#define _PTRDIFF_T_
+#define _BSD_PTRDIFF_T_
+#define ___int_ptrdiff_t_h
+#define _GCC_PTRDIFF_T
+#ifndef __PTRDIFF_TYPE__
+#define __PTRDIFF_TYPE__ long int
+#endif
+typedef __PTRDIFF_TYPE__ ptrdiff_t;
+#endif /* _GCC_PTRDIFF_T */
+#endif /* ___int_ptrdiff_t_h */
+#endif /* _BSD_PTRDIFF_T_ */
+#endif /* _PTRDIFF_T_ */
+#endif /* __PTRDIFF_T */
+#endif /* _T_PTRDIFF */
+#endif /* _T_PTRDIFF_ */
+#endif /* _PTRDIFF_T */
+
+/* If this symbol has done its job, get rid of it. */
+#undef __need_ptrdiff_t
+
+#endif /* _STDDEF_H or __need_ptrdiff_t. */
+
+/* Unsigned type of `sizeof' something. */
+
+/* Define this type if we are doing the whole job,
+ or if we want this type in particular. */
+#if defined (_STDDEF_H) || defined (__need_size_t)
+#ifndef __size_t__ /* BeOS */
+#ifndef __SIZE_T__ /* Cray Unicos/Mk */
+#ifndef _SIZE_T /* in case <sys/types.h> has defined it. */
+#ifndef _SYS_SIZE_T_H
+#ifndef _T_SIZE_
+#ifndef _T_SIZE
+#ifndef __SIZE_T
+#ifndef _SIZE_T_
+#ifndef _BSD_SIZE_T_
+#ifndef _SIZE_T_DEFINED_
+#ifndef _SIZE_T_DEFINED
+#ifndef _BSD_SIZE_T_DEFINED_ /* Darwin */
+#ifndef _SIZE_T_DECLARED /* FreeBSD 5 */
+#ifndef ___int_size_t_h
+#ifndef _GCC_SIZE_T
+#ifndef _SIZET_
+#ifndef __size_t
+#define __size_t__ /* BeOS */
+#define __SIZE_T__ /* Cray Unicos/Mk */
+#define _SIZE_T
+#define _SYS_SIZE_T_H
+#define _T_SIZE_
+#define _T_SIZE
+#define __SIZE_T
+#define _SIZE_T_
+#define _BSD_SIZE_T_
+#define _SIZE_T_DEFINED_
+#define _SIZE_T_DEFINED
+#define _BSD_SIZE_T_DEFINED_ /* Darwin */
+#define _SIZE_T_DECLARED /* FreeBSD 5 */
+#define ___int_size_t_h
+#define _GCC_SIZE_T
+#define _SIZET_
+#if (defined (__FreeBSD__) && (__FreeBSD__ >= 5)) \
+ || defined(__FreeBSD_kernel__)
+/* __size_t is a typedef on FreeBSD 5, must not trash it. */
+#elif defined (__VMS__)
+/* __size_t is also a typedef on VMS. */
+#else
+#define __size_t
+#endif
+#ifndef __SIZE_TYPE__
+#define __SIZE_TYPE__ long unsigned int
+#endif
+#if !(defined (__GNUG__) && defined (size_t))
+typedef __SIZE_TYPE__ size_t;
+#ifdef __BEOS__
+typedef long ssize_t;
+#endif /* __BEOS__ */
+#endif /* !(defined (__GNUG__) && defined (size_t)) */
+#endif /* __size_t */
+#endif /* _SIZET_ */
+#endif /* _GCC_SIZE_T */
+#endif /* ___int_size_t_h */
+#endif /* _SIZE_T_DECLARED */
+#endif /* _BSD_SIZE_T_DEFINED_ */
+#endif /* _SIZE_T_DEFINED */
+#endif /* _SIZE_T_DEFINED_ */
+#endif /* _BSD_SIZE_T_ */
+#endif /* _SIZE_T_ */
+#endif /* __SIZE_T */
+#endif /* _T_SIZE */
+#endif /* _T_SIZE_ */
+#endif /* _SYS_SIZE_T_H */
+#endif /* _SIZE_T */
+#endif /* __SIZE_T__ */
+#endif /* __size_t__ */
+#undef __need_size_t
+#endif /* _STDDEF_H or __need_size_t. */
+
+
+/* Wide character type.
+ Locale-writers should change this as necessary to
+ be big enough to hold unique values not between 0 and 127,
+ and not (wchar_t) -1, for each defined multibyte character. */
+
+/* Define this type if we are doing the whole job,
+ or if we want this type in particular. */
+#if defined (_STDDEF_H) || defined (__need_wchar_t)
+#ifndef __wchar_t__ /* BeOS */
+#ifndef __WCHAR_T__ /* Cray Unicos/Mk */
+#ifndef _WCHAR_T
+#ifndef _T_WCHAR_
+#ifndef _T_WCHAR
+#ifndef __WCHAR_T
+#ifndef _WCHAR_T_
+#ifndef _BSD_WCHAR_T_
+#ifndef _BSD_WCHAR_T_DEFINED_ /* Darwin */
+#ifndef _BSD_RUNE_T_DEFINED_ /* Darwin */
+#ifndef _WCHAR_T_DECLARED /* FreeBSD 5 */
+#ifndef _WCHAR_T_DEFINED_
+#ifndef _WCHAR_T_DEFINED
+#ifndef _WCHAR_T_H
+#ifndef ___int_wchar_t_h
+#ifndef __INT_WCHAR_T_H
+#ifndef _GCC_WCHAR_T
+#define __wchar_t__ /* BeOS */
+#define __WCHAR_T__ /* Cray Unicos/Mk */
+#define _WCHAR_T
+#define _T_WCHAR_
+#define _T_WCHAR
+#define __WCHAR_T
+#define _WCHAR_T_
+#define _BSD_WCHAR_T_
+#define _WCHAR_T_DEFINED_
+#define _WCHAR_T_DEFINED
+#define _WCHAR_T_H
+#define ___int_wchar_t_h
+#define __INT_WCHAR_T_H
+#define _GCC_WCHAR_T
+#define _WCHAR_T_DECLARED
+
+/* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_
+ instead of _WCHAR_T_, and _BSD_RUNE_T_ (which, unlike the other
+ symbols in the _FOO_T_ family, stays defined even after its
+ corresponding type is defined). If we define wchar_t, then we
+ must undef _WCHAR_T_; for BSD/386 1.1 (and perhaps others), if
+ we undef _WCHAR_T_, then we must also define rune_t, since
+ headers like runetype.h assume that if machine/ansi.h is included,
+ and _BSD_WCHAR_T_ is not defined, then rune_t is available.
+ machine/ansi.h says, "Note that _WCHAR_T_ and _RUNE_T_ must be of
+ the same type." */
+#ifdef _BSD_WCHAR_T_
+#undef _BSD_WCHAR_T_
+#ifdef _BSD_RUNE_T_
+#if !defined (_ANSI_SOURCE) && !defined (_POSIX_SOURCE)
+typedef _BSD_RUNE_T_ rune_t;
+#define _BSD_WCHAR_T_DEFINED_
+#define _BSD_RUNE_T_DEFINED_ /* Darwin */
+#if defined (__FreeBSD__) && (__FreeBSD__ < 5)
+/* Why is this file so hard to maintain properly? In contrast to
+ the comment above regarding BSD/386 1.1, on FreeBSD for as long
+ as the symbol has existed, _BSD_RUNE_T_ must not stay defined or
+ redundant typedefs will occur when stdlib.h is included after this file. */
+#undef _BSD_RUNE_T_
+#endif
+#endif
+#endif
+#endif
+/* FreeBSD 5 can't be handled well using "traditional" logic above
+ since it no longer defines _BSD_RUNE_T_ yet still desires to export
+ rune_t in some cases... */
+#if defined (__FreeBSD__) && (__FreeBSD__ >= 5)
+#if !defined (_ANSI_SOURCE) && !defined (_POSIX_SOURCE)
+#if __BSD_VISIBLE
+#ifndef _RUNE_T_DECLARED
+typedef __rune_t rune_t;
+#define _RUNE_T_DECLARED
+#endif
+#endif
+#endif
+#endif
+
+#ifndef __WCHAR_TYPE__
+#define __WCHAR_TYPE__ int
+#endif
+#ifndef __cplusplus
+typedef __WCHAR_TYPE__ wchar_t;
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif /* _WCHAR_T_DECLARED */
+#endif /* _BSD_RUNE_T_DEFINED_ */
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif /* __WCHAR_T__ */
+#endif /* __wchar_t__ */
+#undef __need_wchar_t
+#endif /* _STDDEF_H or __need_wchar_t. */
+
+#if defined (__need_wint_t)
+#ifndef _WINT_T
+#define _WINT_T
+
+#ifndef __WINT_TYPE__
+#define __WINT_TYPE__ unsigned int
+#endif
+typedef __WINT_TYPE__ wint_t;
+#endif
+#undef __need_wint_t
+#endif
+
+/* In 4.3bsd-net2, leave these undefined to indicate that size_t, etc.
+ are already defined. */
+/* BSD/OS 3.1 and FreeBSD [23].x require the MACHINE_ANSI_H check here. */
+/* NetBSD 5 requires the I386_ANSI_H and X86_64_ANSI_H checks here. */
+#if defined(_ANSI_H_) || defined(_MACHINE_ANSI_H_) || defined(_X86_64_ANSI_H_) || defined(_I386_ANSI_H_)
+/* The references to _GCC_PTRDIFF_T_, _GCC_SIZE_T_, and _GCC_WCHAR_T_
+ are probably typos and should be removed before 2.8 is released. */
+#ifdef _GCC_PTRDIFF_T_
+#undef _PTRDIFF_T_
+#undef _BSD_PTRDIFF_T_
+#endif
+#ifdef _GCC_SIZE_T_
+#undef _SIZE_T_
+#undef _BSD_SIZE_T_
+#endif
+#ifdef _GCC_WCHAR_T_
+#undef _WCHAR_T_
+#undef _BSD_WCHAR_T_
+#endif
+/* The following ones are the real ones. */
+#ifdef _GCC_PTRDIFF_T
+#undef _PTRDIFF_T_
+#undef _BSD_PTRDIFF_T_
+#endif
+#ifdef _GCC_SIZE_T
+#undef _SIZE_T_
+#undef _BSD_SIZE_T_
+#endif
+#ifdef _GCC_WCHAR_T
+#undef _WCHAR_T_
+#undef _BSD_WCHAR_T_
+#endif
+#endif /* _ANSI_H_ || _MACHINE_ANSI_H_ || _X86_64_ANSI_H_ || _I386_ANSI_H_ */
+
+#endif /* __sys_stdtypes_h */
+
+/* A null pointer constant. */
+
+#if defined (_STDDEF_H) || defined (__need_NULL)
+#undef NULL /* in case <stdio.h> has defined it. */
+#ifdef __GNUG__
+#define NULL __null
+#else /* G++ */
+#ifndef __cplusplus
+#define NULL ((void *)0)
+#else /* C++ */
+#define NULL 0
+#endif /* C++ */
+#endif /* G++ */
+#endif /* NULL not defined and <stddef.h> or need NULL. */
+#undef __need_NULL
+
+#ifdef _STDDEF_H
+
+/* Offset of member MEMBER in a struct of type TYPE. */
+#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER)
+
+#if (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) \
+ || (defined(__cplusplus) && __cplusplus >= 201103L)
+#ifndef _GCC_MAX_ALIGN_T
+#define _GCC_MAX_ALIGN_T
+/* Type whose alignment is supported in every context and is at least
+ as great as that of any standard type not using alignment
+ specifiers. */
+typedef struct {
+ long long __max_align_ll __attribute__((__aligned__(__alignof__(long long))));
+ long double __max_align_ld __attribute__((__aligned__(__alignof__(long double))));
+} max_align_t;
+#endif
+#endif /* C11 or C++11. */
+
+#if defined(__cplusplus) && __cplusplus >= 201103L
+#ifndef _GXX_NULLPTR_T
+#define _GXX_NULLPTR_T
+ typedef decltype(nullptr) nullptr_t;
+#endif
+#endif /* C++11. */
+
+#endif /* _STDDEF_H was defined this time */
+
+#endif /* !_STDDEF_H && !_STDDEF_H_ && !_ANSI_STDDEF_H && !__STDDEF_H__
+ || __need_XXX was not defined before */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdfix.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdfix.h
new file mode 100644
index 0000000..93e759a
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdfix.h
@@ -0,0 +1,204 @@
+/* Copyright (C) 2007-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* ISO/IEC JTC1 SC22 WG14 N1169
+ * Date: 2006-04-04
+ * ISO/IEC TR 18037
+ * Programming languages - C - Extensions to support embedded processors
+ */
+
+#ifndef _STDFIX_H
+#define _STDFIX_H
+
+/* 7.18a.1 Introduction. */
+
+#undef fract
+#undef accum
+#undef sat
+#define fract _Fract
+#define accum _Accum
+#define sat _Sat
+
+/* 7.18a.3 Precision macros. */
+
+#undef SFRACT_FBIT
+#undef SFRACT_MIN
+#undef SFRACT_MAX
+#undef SFRACT_EPSILON
+#define SFRACT_FBIT __SFRACT_FBIT__
+#define SFRACT_MIN __SFRACT_MIN__
+#define SFRACT_MAX __SFRACT_MAX__
+#define SFRACT_EPSILON __SFRACT_EPSILON__
+
+#undef USFRACT_FBIT
+#undef USFRACT_MIN
+#undef USFRACT_MAX
+#undef USFRACT_EPSILON
+#define USFRACT_FBIT __USFRACT_FBIT__
+#define USFRACT_MIN __USFRACT_MIN__ /* GCC extension. */
+#define USFRACT_MAX __USFRACT_MAX__
+#define USFRACT_EPSILON __USFRACT_EPSILON__
+
+#undef FRACT_FBIT
+#undef FRACT_MIN
+#undef FRACT_MAX
+#undef FRACT_EPSILON
+#define FRACT_FBIT __FRACT_FBIT__
+#define FRACT_MIN __FRACT_MIN__
+#define FRACT_MAX __FRACT_MAX__
+#define FRACT_EPSILON __FRACT_EPSILON__
+
+#undef UFRACT_FBIT
+#undef UFRACT_MIN
+#undef UFRACT_MAX
+#undef UFRACT_EPSILON
+#define UFRACT_FBIT __UFRACT_FBIT__
+#define UFRACT_MIN __UFRACT_MIN__ /* GCC extension. */
+#define UFRACT_MAX __UFRACT_MAX__
+#define UFRACT_EPSILON __UFRACT_EPSILON__
+
+#undef LFRACT_FBIT
+#undef LFRACT_MIN
+#undef LFRACT_MAX
+#undef LFRACT_EPSILON
+#define LFRACT_FBIT __LFRACT_FBIT__
+#define LFRACT_MIN __LFRACT_MIN__
+#define LFRACT_MAX __LFRACT_MAX__
+#define LFRACT_EPSILON __LFRACT_EPSILON__
+
+#undef ULFRACT_FBIT
+#undef ULFRACT_MIN
+#undef ULFRACT_MAX
+#undef ULFRACT_EPSILON
+#define ULFRACT_FBIT __ULFRACT_FBIT__
+#define ULFRACT_MIN __ULFRACT_MIN__ /* GCC extension. */
+#define ULFRACT_MAX __ULFRACT_MAX__
+#define ULFRACT_EPSILON __ULFRACT_EPSILON__
+
+#undef LLFRACT_FBIT
+#undef LLFRACT_MIN
+#undef LLFRACT_MAX
+#undef LLFRACT_EPSILON
+#define LLFRACT_FBIT __LLFRACT_FBIT__ /* GCC extension. */
+#define LLFRACT_MIN __LLFRACT_MIN__ /* GCC extension. */
+#define LLFRACT_MAX __LLFRACT_MAX__ /* GCC extension. */
+#define LLFRACT_EPSILON __LLFRACT_EPSILON__ /* GCC extension. */
+
+#undef ULLFRACT_FBIT
+#undef ULLFRACT_MIN
+#undef ULLFRACT_MAX
+#undef ULLFRACT_EPSILON
+#define ULLFRACT_FBIT __ULLFRACT_FBIT__ /* GCC extension. */
+#define ULLFRACT_MIN __ULLFRACT_MIN__ /* GCC extension. */
+#define ULLFRACT_MAX __ULLFRACT_MAX__ /* GCC extension. */
+#define ULLFRACT_EPSILON __ULLFRACT_EPSILON__ /* GCC extension. */
+
+#undef SACCUM_FBIT
+#undef SACCUM_IBIT
+#undef SACCUM_MIN
+#undef SACCUM_MAX
+#undef SACCUM_EPSILON
+#define SACCUM_FBIT __SACCUM_FBIT__
+#define SACCUM_IBIT __SACCUM_IBIT__
+#define SACCUM_MIN __SACCUM_MIN__
+#define SACCUM_MAX __SACCUM_MAX__
+#define SACCUM_EPSILON __SACCUM_EPSILON__
+
+#undef USACCUM_FBIT
+#undef USACCUM_IBIT
+#undef USACCUM_MIN
+#undef USACCUM_MAX
+#undef USACCUM_EPSILON
+#define USACCUM_FBIT __USACCUM_FBIT__
+#define USACCUM_IBIT __USACCUM_IBIT__
+#define USACCUM_MIN __USACCUM_MIN__ /* GCC extension. */
+#define USACCUM_MAX __USACCUM_MAX__
+#define USACCUM_EPSILON __USACCUM_EPSILON__
+
+#undef ACCUM_FBIT
+#undef ACCUM_IBIT
+#undef ACCUM_MIN
+#undef ACCUM_MAX
+#undef ACCUM_EPSILON
+#define ACCUM_FBIT __ACCUM_FBIT__
+#define ACCUM_IBIT __ACCUM_IBIT__
+#define ACCUM_MIN __ACCUM_MIN__
+#define ACCUM_MAX __ACCUM_MAX__
+#define ACCUM_EPSILON __ACCUM_EPSILON__
+
+#undef UACCUM_FBIT
+#undef UACCUM_IBIT
+#undef UACCUM_MIN
+#undef UACCUM_MAX
+#undef UACCUM_EPSILON
+#define UACCUM_FBIT __UACCUM_FBIT__
+#define UACCUM_IBIT __UACCUM_IBIT__
+#define UACCUM_MIN __UACCUM_MIN__ /* GCC extension. */
+#define UACCUM_MAX __UACCUM_MAX__
+#define UACCUM_EPSILON __UACCUM_EPSILON__
+
+#undef LACCUM_FBIT
+#undef LACCUM_IBIT
+#undef LACCUM_MIN
+#undef LACCUM_MAX
+#undef LACCUM_EPSILON
+#define LACCUM_FBIT __LACCUM_FBIT__
+#define LACCUM_IBIT __LACCUM_IBIT__
+#define LACCUM_MIN __LACCUM_MIN__
+#define LACCUM_MAX __LACCUM_MAX__
+#define LACCUM_EPSILON __LACCUM_EPSILON__
+
+#undef ULACCUM_FBIT
+#undef ULACCUM_IBIT
+#undef ULACCUM_MIN
+#undef ULACCUM_MAX
+#undef ULACCUM_EPSILON
+#define ULACCUM_FBIT __ULACCUM_FBIT__
+#define ULACCUM_IBIT __ULACCUM_IBIT__
+#define ULACCUM_MIN __ULACCUM_MIN__ /* GCC extension. */
+#define ULACCUM_MAX __ULACCUM_MAX__
+#define ULACCUM_EPSILON __ULACCUM_EPSILON__
+
+#undef LLACCUM_FBIT
+#undef LLACCUM_IBIT
+#undef LLACCUM_MIN
+#undef LLACCUM_MAX
+#undef LLACCUM_EPSILON
+#define LLACCUM_FBIT __LLACCUM_FBIT__ /* GCC extension. */
+#define LLACCUM_IBIT __LLACCUM_IBIT__ /* GCC extension. */
+#define LLACCUM_MIN __LLACCUM_MIN__ /* GCC extension. */
+#define LLACCUM_MAX __LLACCUM_MAX__ /* GCC extension. */
+#define LLACCUM_EPSILON __LLACCUM_EPSILON__ /* GCC extension. */
+
+#undef ULLACCUM_FBIT
+#undef ULLACCUM_IBIT
+#undef ULLACCUM_MIN
+#undef ULLACCUM_MAX
+#undef ULLACCUM_EPSILON
+#define ULLACCUM_FBIT __ULLACCUM_FBIT__ /* GCC extension. */
+#define ULLACCUM_IBIT __ULLACCUM_IBIT__ /* GCC extension. */
+#define ULLACCUM_MIN __ULLACCUM_MIN__ /* GCC extension. */
+#define ULLACCUM_MAX __ULLACCUM_MAX__ /* GCC extension. */
+#define ULLACCUM_EPSILON __ULLACCUM_EPSILON__ /* GCC extension. */
+
+#endif /* _STDFIX_H */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdint-gcc.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdint-gcc.h
new file mode 100644
index 0000000..1470cea
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdint-gcc.h
@@ -0,0 +1,263 @@
+/* Copyright (C) 2008-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * ISO C Standard: 7.18 Integer types <stdint.h>
+ */
+
+#ifndef _GCC_STDINT_H
+#define _GCC_STDINT_H
+
+/* 7.8.1.1 Exact-width integer types */
+
+#ifdef __INT8_TYPE__
+typedef __INT8_TYPE__ int8_t;
+#endif
+#ifdef __INT16_TYPE__
+typedef __INT16_TYPE__ int16_t;
+#endif
+#ifdef __INT32_TYPE__
+typedef __INT32_TYPE__ int32_t;
+#endif
+#ifdef __INT64_TYPE__
+typedef __INT64_TYPE__ int64_t;
+#endif
+#ifdef __UINT8_TYPE__
+typedef __UINT8_TYPE__ uint8_t;
+#endif
+#ifdef __UINT16_TYPE__
+typedef __UINT16_TYPE__ uint16_t;
+#endif
+#ifdef __UINT32_TYPE__
+typedef __UINT32_TYPE__ uint32_t;
+#endif
+#ifdef __UINT64_TYPE__
+typedef __UINT64_TYPE__ uint64_t;
+#endif
+
+/* 7.8.1.2 Minimum-width integer types */
+
+typedef __INT_LEAST8_TYPE__ int_least8_t;
+typedef __INT_LEAST16_TYPE__ int_least16_t;
+typedef __INT_LEAST32_TYPE__ int_least32_t;
+typedef __INT_LEAST64_TYPE__ int_least64_t;
+typedef __UINT_LEAST8_TYPE__ uint_least8_t;
+typedef __UINT_LEAST16_TYPE__ uint_least16_t;
+typedef __UINT_LEAST32_TYPE__ uint_least32_t;
+typedef __UINT_LEAST64_TYPE__ uint_least64_t;
+
+/* 7.8.1.3 Fastest minimum-width integer types */
+
+typedef __INT_FAST8_TYPE__ int_fast8_t;
+typedef __INT_FAST16_TYPE__ int_fast16_t;
+typedef __INT_FAST32_TYPE__ int_fast32_t;
+typedef __INT_FAST64_TYPE__ int_fast64_t;
+typedef __UINT_FAST8_TYPE__ uint_fast8_t;
+typedef __UINT_FAST16_TYPE__ uint_fast16_t;
+typedef __UINT_FAST32_TYPE__ uint_fast32_t;
+typedef __UINT_FAST64_TYPE__ uint_fast64_t;
+
+/* 7.8.1.4 Integer types capable of holding object pointers */
+
+#ifdef __INTPTR_TYPE__
+typedef __INTPTR_TYPE__ intptr_t;
+#endif
+#ifdef __UINTPTR_TYPE__
+typedef __UINTPTR_TYPE__ uintptr_t;
+#endif
+
+/* 7.8.1.5 Greatest-width integer types */
+
+typedef __INTMAX_TYPE__ intmax_t;
+typedef __UINTMAX_TYPE__ uintmax_t;
+
+#if (!defined __cplusplus || __cplusplus >= 201103L \
+ || defined __STDC_LIMIT_MACROS)
+
+/* 7.18.2 Limits of specified-width integer types */
+
+#ifdef __INT8_MAX__
+# undef INT8_MAX
+# define INT8_MAX __INT8_MAX__
+# undef INT8_MIN
+# define INT8_MIN (-INT8_MAX - 1)
+#endif
+#ifdef __UINT8_MAX__
+# undef UINT8_MAX
+# define UINT8_MAX __UINT8_MAX__
+#endif
+#ifdef __INT16_MAX__
+# undef INT16_MAX
+# define INT16_MAX __INT16_MAX__
+# undef INT16_MIN
+# define INT16_MIN (-INT16_MAX - 1)
+#endif
+#ifdef __UINT16_MAX__
+# undef UINT16_MAX
+# define UINT16_MAX __UINT16_MAX__
+#endif
+#ifdef __INT32_MAX__
+# undef INT32_MAX
+# define INT32_MAX __INT32_MAX__
+# undef INT32_MIN
+# define INT32_MIN (-INT32_MAX - 1)
+#endif
+#ifdef __UINT32_MAX__
+# undef UINT32_MAX
+# define UINT32_MAX __UINT32_MAX__
+#endif
+#ifdef __INT64_MAX__
+# undef INT64_MAX
+# define INT64_MAX __INT64_MAX__
+# undef INT64_MIN
+# define INT64_MIN (-INT64_MAX - 1)
+#endif
+#ifdef __UINT64_MAX__
+# undef UINT64_MAX
+# define UINT64_MAX __UINT64_MAX__
+#endif
+
+#undef INT_LEAST8_MAX
+#define INT_LEAST8_MAX __INT_LEAST8_MAX__
+#undef INT_LEAST8_MIN
+#define INT_LEAST8_MIN (-INT_LEAST8_MAX - 1)
+#undef UINT_LEAST8_MAX
+#define UINT_LEAST8_MAX __UINT_LEAST8_MAX__
+#undef INT_LEAST16_MAX
+#define INT_LEAST16_MAX __INT_LEAST16_MAX__
+#undef INT_LEAST16_MIN
+#define INT_LEAST16_MIN (-INT_LEAST16_MAX - 1)
+#undef UINT_LEAST16_MAX
+#define UINT_LEAST16_MAX __UINT_LEAST16_MAX__
+#undef INT_LEAST32_MAX
+#define INT_LEAST32_MAX __INT_LEAST32_MAX__
+#undef INT_LEAST32_MIN
+#define INT_LEAST32_MIN (-INT_LEAST32_MAX - 1)
+#undef UINT_LEAST32_MAX
+#define UINT_LEAST32_MAX __UINT_LEAST32_MAX__
+#undef INT_LEAST64_MAX
+#define INT_LEAST64_MAX __INT_LEAST64_MAX__
+#undef INT_LEAST64_MIN
+#define INT_LEAST64_MIN (-INT_LEAST64_MAX - 1)
+#undef UINT_LEAST64_MAX
+#define UINT_LEAST64_MAX __UINT_LEAST64_MAX__
+
+#undef INT_FAST8_MAX
+#define INT_FAST8_MAX __INT_FAST8_MAX__
+#undef INT_FAST8_MIN
+#define INT_FAST8_MIN (-INT_FAST8_MAX - 1)
+#undef UINT_FAST8_MAX
+#define UINT_FAST8_MAX __UINT_FAST8_MAX__
+#undef INT_FAST16_MAX
+#define INT_FAST16_MAX __INT_FAST16_MAX__
+#undef INT_FAST16_MIN
+#define INT_FAST16_MIN (-INT_FAST16_MAX - 1)
+#undef UINT_FAST16_MAX
+#define UINT_FAST16_MAX __UINT_FAST16_MAX__
+#undef INT_FAST32_MAX
+#define INT_FAST32_MAX __INT_FAST32_MAX__
+#undef INT_FAST32_MIN
+#define INT_FAST32_MIN (-INT_FAST32_MAX - 1)
+#undef UINT_FAST32_MAX
+#define UINT_FAST32_MAX __UINT_FAST32_MAX__
+#undef INT_FAST64_MAX
+#define INT_FAST64_MAX __INT_FAST64_MAX__
+#undef INT_FAST64_MIN
+#define INT_FAST64_MIN (-INT_FAST64_MAX - 1)
+#undef UINT_FAST64_MAX
+#define UINT_FAST64_MAX __UINT_FAST64_MAX__
+
+#ifdef __INTPTR_MAX__
+# undef INTPTR_MAX
+# define INTPTR_MAX __INTPTR_MAX__
+# undef INTPTR_MIN
+# define INTPTR_MIN (-INTPTR_MAX - 1)
+#endif
+#ifdef __UINTPTR_MAX__
+# undef UINTPTR_MAX
+# define UINTPTR_MAX __UINTPTR_MAX__
+#endif
+
+#undef INTMAX_MAX
+#define INTMAX_MAX __INTMAX_MAX__
+#undef INTMAX_MIN
+#define INTMAX_MIN (-INTMAX_MAX - 1)
+#undef UINTMAX_MAX
+#define UINTMAX_MAX __UINTMAX_MAX__
+
+/* 7.18.3 Limits of other integer types */
+
+#undef PTRDIFF_MAX
+#define PTRDIFF_MAX __PTRDIFF_MAX__
+#undef PTRDIFF_MIN
+#define PTRDIFF_MIN (-PTRDIFF_MAX - 1)
+
+#undef SIG_ATOMIC_MAX
+#define SIG_ATOMIC_MAX __SIG_ATOMIC_MAX__
+#undef SIG_ATOMIC_MIN
+#define SIG_ATOMIC_MIN __SIG_ATOMIC_MIN__
+
+#undef SIZE_MAX
+#define SIZE_MAX __SIZE_MAX__
+
+#undef WCHAR_MAX
+#define WCHAR_MAX __WCHAR_MAX__
+#undef WCHAR_MIN
+#define WCHAR_MIN __WCHAR_MIN__
+
+#undef WINT_MAX
+#define WINT_MAX __WINT_MAX__
+#undef WINT_MIN
+#define WINT_MIN __WINT_MIN__
+
+#endif /* (!defined __cplusplus || __cplusplus >= 201103L
+ || defined __STDC_LIMIT_MACROS) */
+
+#if (!defined __cplusplus || __cplusplus >= 201103L \
+ || defined __STDC_CONSTANT_MACROS)
+
+#undef INT8_C
+#define INT8_C(c) __INT8_C(c)
+#undef INT16_C
+#define INT16_C(c) __INT16_C(c)
+#undef INT32_C
+#define INT32_C(c) __INT32_C(c)
+#undef INT64_C
+#define INT64_C(c) __INT64_C(c)
+#undef UINT8_C
+#define UINT8_C(c) __UINT8_C(c)
+#undef UINT16_C
+#define UINT16_C(c) __UINT16_C(c)
+#undef UINT32_C
+#define UINT32_C(c) __UINT32_C(c)
+#undef UINT64_C
+#define UINT64_C(c) __UINT64_C(c)
+#undef INTMAX_C
+#define INTMAX_C(c) __INTMAX_C(c)
+#undef UINTMAX_C
+#define UINTMAX_C(c) __UINTMAX_C(c)
+
+#endif /* (!defined __cplusplus || __cplusplus >= 201103L
+ || defined __STDC_CONSTANT_MACROS) */
+
+#endif /* _GCC_STDINT_H */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdint.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdint.h
new file mode 100644
index 0000000..83b6f70
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdint.h
@@ -0,0 +1,14 @@
+#ifndef _GCC_WRAP_STDINT_H
+#if __STDC_HOSTED__
+# if defined __cplusplus && __cplusplus >= 201103L
+# undef __STDC_LIMIT_MACROS
+# define __STDC_LIMIT_MACROS
+# undef __STDC_CONSTANT_MACROS
+# define __STDC_CONSTANT_MACROS
+# endif
+# include_next <stdint.h>
+#else
+# include "stdint-gcc.h"
+#endif
+#define _GCC_WRAP_STDINT_H
+#endif
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdnoreturn.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdnoreturn.h
new file mode 100644
index 0000000..0134137
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/stdnoreturn.h
@@ -0,0 +1,35 @@
+/* Copyright (C) 2011-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* ISO C1X: 7.23 _Noreturn <stdnoreturn.h>. */
+
+#ifndef _STDNORETURN_H
+#define _STDNORETURN_H
+
+#ifndef __cplusplus
+
+#define noreturn _Noreturn
+
+#endif
+
+#endif /* stdnoreturn.h */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/unwind-arm-common.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/unwind-arm-common.h
new file mode 100644
index 0000000..65b50bc
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/unwind-arm-common.h
@@ -0,0 +1,250 @@
+/* Header file for the ARM EABI and C6X unwinders
+ Copyright (C) 2003-2014 Free Software Foundation, Inc.
+ Contributed by Paul Brook
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Language-independent unwinder header public defines. This contains both
+ ABI defined objects, and GNU support routines. */
+
+#ifndef UNWIND_ARM_COMMON_H
+#define UNWIND_ARM_COMMON_H
+
+#define __ARM_EABI_UNWINDER__ 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ typedef unsigned _Unwind_Word __attribute__((__mode__(__word__)));
+ typedef signed _Unwind_Sword __attribute__((__mode__(__word__)));
+ typedef unsigned _Unwind_Ptr __attribute__((__mode__(__pointer__)));
+ typedef unsigned _Unwind_Internal_Ptr __attribute__((__mode__(__pointer__)));
+ typedef _Unwind_Word _uw;
+ typedef unsigned _uw64 __attribute__((mode(__DI__)));
+ typedef unsigned _uw16 __attribute__((mode(__HI__)));
+ typedef unsigned _uw8 __attribute__((mode(__QI__)));
+
+ typedef enum
+ {
+ _URC_OK = 0, /* operation completed successfully */
+ _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+ _URC_END_OF_STACK = 5,
+ _URC_HANDLER_FOUND = 6,
+ _URC_INSTALL_CONTEXT = 7,
+ _URC_CONTINUE_UNWIND = 8,
+ _URC_FAILURE = 9 /* unspecified failure of some kind */
+ }
+ _Unwind_Reason_Code;
+
+ typedef enum
+ {
+ _US_VIRTUAL_UNWIND_FRAME = 0,
+ _US_UNWIND_FRAME_STARTING = 1,
+ _US_UNWIND_FRAME_RESUME = 2,
+ _US_ACTION_MASK = 3,
+ _US_FORCE_UNWIND = 8,
+ _US_END_OF_STACK = 16
+ }
+ _Unwind_State;
+
+ /* Provided only for compatibility with existing code. */
+ typedef int _Unwind_Action;
+#define _UA_SEARCH_PHASE 1
+#define _UA_CLEANUP_PHASE 2
+#define _UA_HANDLER_FRAME 4
+#define _UA_FORCE_UNWIND 8
+#define _UA_END_OF_STACK 16
+#define _URC_NO_REASON _URC_OK
+
+ typedef struct _Unwind_Control_Block _Unwind_Control_Block;
+ typedef struct _Unwind_Context _Unwind_Context;
+ typedef _uw _Unwind_EHT_Header;
+
+
+ /* UCB: */
+
+ struct _Unwind_Control_Block
+ {
+ char exception_class[8];
+ void (*exception_cleanup)(_Unwind_Reason_Code, _Unwind_Control_Block *);
+ /* Unwinder cache, private fields for the unwinder's use */
+ struct
+ {
+ _uw reserved1; /* Forced unwind stop fn, 0 if not forced */
+ _uw reserved2; /* Personality routine address */
+ _uw reserved3; /* Saved callsite address */
+ _uw reserved4; /* Forced unwind stop arg */
+ _uw reserved5;
+ }
+ unwinder_cache;
+ /* Propagation barrier cache (valid after phase 1): */
+ struct
+ {
+ _uw sp;
+ _uw bitpattern[5];
+ }
+ barrier_cache;
+ /* Cleanup cache (preserved over cleanup): */
+ struct
+ {
+ _uw bitpattern[4];
+ }
+ cleanup_cache;
+ /* Pr cache (for pr's benefit): */
+ struct
+ {
+ _uw fnstart; /* function start address */
+ _Unwind_EHT_Header *ehtp; /* pointer to EHT entry header word */
+ _uw additional; /* additional data */
+ _uw reserved1;
+ }
+ pr_cache;
+ long long int :0; /* Force alignment to 8-byte boundary */
+ };
+
+ /* Virtual Register Set*/
+
+ typedef enum
+ {
+ _UVRSC_CORE = 0, /* integer register */
+ _UVRSC_VFP = 1, /* vfp */
+ _UVRSC_FPA = 2, /* fpa */
+ _UVRSC_WMMXD = 3, /* Intel WMMX data register */
+ _UVRSC_WMMXC = 4 /* Intel WMMX control register */
+ }
+ _Unwind_VRS_RegClass;
+
+ typedef enum
+ {
+ _UVRSD_UINT32 = 0,
+ _UVRSD_VFPX = 1,
+ _UVRSD_FPAX = 2,
+ _UVRSD_UINT64 = 3,
+ _UVRSD_FLOAT = 4,
+ _UVRSD_DOUBLE = 5
+ }
+ _Unwind_VRS_DataRepresentation;
+
+ typedef enum
+ {
+ _UVRSR_OK = 0,
+ _UVRSR_NOT_IMPLEMENTED = 1,
+ _UVRSR_FAILED = 2
+ }
+ _Unwind_VRS_Result;
+
+ /* Frame unwinding state. */
+ typedef struct
+ {
+ /* The current word (bytes packed msb first). */
+ _uw data;
+ /* Pointer to the next word of data. */
+ _uw *next;
+ /* The number of bytes left in this word. */
+ _uw8 bytes_left;
+ /* The number of words pointed to by ptr. */
+ _uw8 words_left;
+ }
+ __gnu_unwind_state;
+
+ typedef _Unwind_Reason_Code (*personality_routine) (_Unwind_State,
+ _Unwind_Control_Block *, _Unwind_Context *);
+
+ _Unwind_VRS_Result _Unwind_VRS_Set(_Unwind_Context *, _Unwind_VRS_RegClass,
+ _uw, _Unwind_VRS_DataRepresentation,
+ void *);
+
+ _Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *, _Unwind_VRS_RegClass,
+ _uw, _Unwind_VRS_DataRepresentation,
+ void *);
+
+ _Unwind_VRS_Result _Unwind_VRS_Pop(_Unwind_Context *, _Unwind_VRS_RegClass,
+ _uw, _Unwind_VRS_DataRepresentation);
+
+
+ /* Support functions for the PR. */
+#define _Unwind_Exception _Unwind_Control_Block
+ typedef char _Unwind_Exception_Class[8];
+
+ void * _Unwind_GetLanguageSpecificData (_Unwind_Context *);
+ _Unwind_Ptr _Unwind_GetRegionStart (_Unwind_Context *);
+
+ _Unwind_Ptr _Unwind_GetDataRelBase (_Unwind_Context *);
+ /* This should never be used. */
+ _Unwind_Ptr _Unwind_GetTextRelBase (_Unwind_Context *);
+
+ /* Interface functions: */
+ _Unwind_Reason_Code _Unwind_RaiseException(_Unwind_Control_Block *ucbp);
+ void __attribute__((noreturn)) _Unwind_Resume(_Unwind_Control_Block *ucbp);
+ _Unwind_Reason_Code _Unwind_Resume_or_Rethrow (_Unwind_Control_Block *ucbp);
+
+ typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn)
+ (int, _Unwind_Action, _Unwind_Exception_Class,
+ _Unwind_Control_Block *, struct _Unwind_Context *, void *);
+ _Unwind_Reason_Code _Unwind_ForcedUnwind (_Unwind_Control_Block *,
+ _Unwind_Stop_Fn, void *);
+ /* @@@ Use unwind data to perform a stack backtrace. The trace callback
+ is called for every stack frame in the call chain, but no cleanup
+ actions are performed. */
+ typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn) (_Unwind_Context *, void *);
+ _Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn,
+ void*);
+
+ _Unwind_Word _Unwind_GetCFA (struct _Unwind_Context *);
+ void _Unwind_Complete(_Unwind_Control_Block *ucbp);
+ void _Unwind_DeleteException (_Unwind_Exception *);
+
+ _Unwind_Reason_Code __gnu_unwind_frame (_Unwind_Control_Block *,
+ _Unwind_Context *);
+ _Unwind_Reason_Code __gnu_unwind_execute (_Unwind_Context *,
+ __gnu_unwind_state *);
+
+ static inline _Unwind_Word
+ _Unwind_GetGR (_Unwind_Context *context, int regno)
+ {
+ _uw val;
+ _Unwind_VRS_Get (context, _UVRSC_CORE, regno, _UVRSD_UINT32, &val);
+ return val;
+ }
+
+#define _Unwind_GetIPInfo(context, ip_before_insn) \
+ (*ip_before_insn = 0, _Unwind_GetIP (context))
+
+ static inline void
+ _Unwind_SetGR (_Unwind_Context *context, int regno, _Unwind_Word val)
+ {
+ _Unwind_VRS_Set (context, _UVRSC_CORE, regno, _UVRSD_UINT32, &val);
+ }
+
+ _Unwind_Ptr _Unwind_GetRegionStart (_Unwind_Context *);
+ void * _Unwind_GetLanguageSpecificData (_Unwind_Context *);
+
+/* leb128 type numbers have a potentially unlimited size.
+ The target of the following definitions of _sleb128_t and _uleb128_t
+ is to have efficient data types large enough to hold the leb128 type
+ numbers used in the unwind code. */
+typedef long _sleb128_t;
+typedef unsigned long _uleb128_t;
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* defined UNWIND_ARM_COMMON_H */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/unwind.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/unwind.h
new file mode 100644
index 0000000..782d175
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/unwind.h
@@ -0,0 +1,85 @@
+/* Header file for the ARM EABI unwinder
+ Copyright (C) 2003-2014 Free Software Foundation, Inc.
+ Contributed by Paul Brook
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Language-independent unwinder header public defines. This contains both
+ ABI defined objects, and GNU support routines. */
+
+#ifndef UNWIND_ARM_H
+#define UNWIND_ARM_H
+
+#include "unwind-arm-common.h"
+
+#define UNWIND_STACK_REG 13
+/* Use IP as a scratch register within the personality routine. */
+#define UNWIND_POINTER_REG 12
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ /* Decode an R_ARM_TARGET2 relocation. */
+ static inline _Unwind_Word
+ _Unwind_decode_typeinfo_ptr (_Unwind_Word base __attribute__ ((unused)),
+ _Unwind_Word ptr)
+ {
+ _Unwind_Word tmp;
+
+ tmp = *(_Unwind_Word *) ptr;
+ /* Zero values are always NULL. */
+ if (!tmp)
+ return 0;
+
+#if (defined(linux) && !defined(__uClinux__)) || defined(__NetBSD__)
+ /* Pc-relative indirect. */
+#define _GLIBCXX_OVERRIDE_TTYPE_ENCODING (DW_EH_PE_pcrel | DW_EH_PE_indirect)
+ tmp += ptr;
+ tmp = *(_Unwind_Word *) tmp;
+#elif defined(__symbian__) || defined(__uClinux__)
+#define _GLIBCXX_OVERRIDE_TTYPE_ENCODING (DW_EH_PE_absptr)
+ /* Absolute pointer. Nothing more to do. */
+#else
+#define _GLIBCXX_OVERRIDE_TTYPE_ENCODING (DW_EH_PE_pcrel)
+ /* Pc-relative pointer. */
+ tmp += ptr;
+#endif
+ return tmp;
+ }
+
+ static inline _Unwind_Reason_Code
+ __gnu_unwind_24bit (_Unwind_Context * context __attribute__ ((unused)),
+ _uw data __attribute__ ((unused)),
+ int compact __attribute__ ((unused)))
+ {
+ return _URC_FAILURE;
+ }
+ /* Return the address of the instruction, not the actual IP value. */
+#define _Unwind_GetIP(context) \
+ (_Unwind_GetGR (context, 15) & ~(_Unwind_Word)1)
+
+#define _Unwind_SetIP(context, val) \
+ _Unwind_SetGR (context, 15, val | (_Unwind_GetGR (context, 15) & 1))
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* defined UNWIND_ARM_H */
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/include/varargs.h b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/varargs.h
new file mode 100644
index 0000000..4b9803e
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/include/varargs.h
@@ -0,0 +1,7 @@
+#ifndef _VARARGS_H
+#define _VARARGS_H
+
+#error "GCC no longer implements <varargs.h>."
+#error "Revise your code to use <stdarg.h>."
+
+#endif
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/libgcc.a b/lib/gcc/arm-linux-androideabi/4.9.x-google/libgcc.a
new file mode 100644
index 0000000..1d48aef
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/libgcov.a b/lib/gcc/arm-linux-androideabi/4.9.x-google/libgcov.a
new file mode 100644
index 0000000..6753695
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/libgcov.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtbegin.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtbegin.o
new file mode 100644
index 0000000..7e34f33
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtbegin.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtbeginS.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtbeginS.o
new file mode 100644
index 0000000..15c6854
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtbeginS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtbeginT.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtbeginT.o
new file mode 100644
index 0000000..7e34f33
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtbeginT.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtend.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtend.o
new file mode 100644
index 0000000..d67f538
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtend.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtendS.o b/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtendS.o
new file mode 100644
index 0000000..d67f538
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/crtendS.o
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/libgcc.a b/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/libgcc.a
new file mode 100644
index 0000000..3e14f32
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/libgcc.a
Binary files differ
diff --git a/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/libgcov.a b/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/libgcov.a
new file mode 100644
index 0000000..908719a
--- /dev/null
+++ b/lib/gcc/arm-linux-androideabi/4.9.x-google/thumb/libgcov.a
Binary files differ
diff --git a/lib/libarm-linux-android-sim.a b/lib/libarm-linux-android-sim.a
new file mode 100644
index 0000000..6eb2635
--- /dev/null
+++ b/lib/libarm-linux-android-sim.a
Binary files differ
diff --git a/lib64/libiberty.a b/lib64/libiberty.a
new file mode 100644
index 0000000..11a8189
--- /dev/null
+++ b/lib64/libiberty.a
Binary files differ
diff --git a/libexec/gcc/arm-linux-androideabi/4.9.x-google/cc1 b/libexec/gcc/arm-linux-androideabi/4.9.x-google/cc1
new file mode 100755
index 0000000..d858335
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.9.x-google/cc1
Binary files differ
diff --git a/libexec/gcc/arm-linux-androideabi/4.9.x-google/cc1plus b/libexec/gcc/arm-linux-androideabi/4.9.x-google/cc1plus
new file mode 100755
index 0000000..9128fa4
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.9.x-google/cc1plus
Binary files differ
diff --git a/libexec/gcc/arm-linux-androideabi/4.9.x-google/collect2 b/libexec/gcc/arm-linux-androideabi/4.9.x-google/collect2
new file mode 100755
index 0000000..f0d007d
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.9.x-google/collect2
Binary files differ
diff --git a/libexec/gcc/arm-linux-androideabi/4.9.x-google/libfunction_reordering_plugin.so b/libexec/gcc/arm-linux-androideabi/4.9.x-google/libfunction_reordering_plugin.so
new file mode 120000
index 0000000..6818e7a
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.9.x-google/libfunction_reordering_plugin.so
@@ -0,0 +1 @@
+libfunction_reordering_plugin.so.0.0.0 \ No newline at end of file
diff --git a/libexec/gcc/arm-linux-androideabi/4.9.x-google/libfunction_reordering_plugin.so.0 b/libexec/gcc/arm-linux-androideabi/4.9.x-google/libfunction_reordering_plugin.so.0
new file mode 120000
index 0000000..6818e7a
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.9.x-google/libfunction_reordering_plugin.so.0
@@ -0,0 +1 @@
+libfunction_reordering_plugin.so.0.0.0 \ No newline at end of file
diff --git a/libexec/gcc/arm-linux-androideabi/4.9.x-google/libfunction_reordering_plugin.so.0.0.0 b/libexec/gcc/arm-linux-androideabi/4.9.x-google/libfunction_reordering_plugin.so.0.0.0
new file mode 100755
index 0000000..1f2391a
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.9.x-google/libfunction_reordering_plugin.so.0.0.0
Binary files differ
diff --git a/libexec/gcc/arm-linux-androideabi/4.9.x-google/liblto_plugin.so b/libexec/gcc/arm-linux-androideabi/4.9.x-google/liblto_plugin.so
new file mode 120000
index 0000000..f25ba88
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.9.x-google/liblto_plugin.so
@@ -0,0 +1 @@
+liblto_plugin.so.0.0.0 \ No newline at end of file
diff --git a/libexec/gcc/arm-linux-androideabi/4.9.x-google/liblto_plugin.so.0 b/libexec/gcc/arm-linux-androideabi/4.9.x-google/liblto_plugin.so.0
new file mode 120000
index 0000000..f25ba88
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.9.x-google/liblto_plugin.so.0
@@ -0,0 +1 @@
+liblto_plugin.so.0.0.0 \ No newline at end of file
diff --git a/libexec/gcc/arm-linux-androideabi/4.9.x-google/liblto_plugin.so.0.0.0 b/libexec/gcc/arm-linux-androideabi/4.9.x-google/liblto_plugin.so.0.0.0
new file mode 100755
index 0000000..20b5cfa
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.9.x-google/liblto_plugin.so.0.0.0
Binary files differ
diff --git a/libexec/gcc/arm-linux-androideabi/4.9.x-google/lto-wrapper b/libexec/gcc/arm-linux-androideabi/4.9.x-google/lto-wrapper
new file mode 100755
index 0000000..fa53c66
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.9.x-google/lto-wrapper
Binary files differ
diff --git a/libexec/gcc/arm-linux-androideabi/4.9.x-google/lto1 b/libexec/gcc/arm-linux-androideabi/4.9.x-google/lto1
new file mode 100755
index 0000000..13740ea
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.9.x-google/lto1
Binary files differ
diff --git a/libexec/gcc/arm-linux-androideabi/4.9.x-google/plugin/gengtype b/libexec/gcc/arm-linux-androideabi/4.9.x-google/plugin/gengtype
new file mode 100755
index 0000000..5b20dc9
--- /dev/null
+++ b/libexec/gcc/arm-linux-androideabi/4.9.x-google/plugin/gengtype
Binary files differ
diff --git a/share/gdb/python/gdb/__init__.py b/share/gdb/python/gdb/__init__.py
new file mode 100644
index 0000000..6311583
--- /dev/null
+++ b/share/gdb/python/gdb/__init__.py
@@ -0,0 +1,124 @@
+# Copyright (C) 2010-2013 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import traceback
+import os
+import sys
+import _gdb
+
+if sys.version_info[0] > 2:
+ # Python 3 moved "reload"
+ from imp import reload
+
+from _gdb import *
+
+class _GdbFile (object):
+ # These two are needed in Python 3
+ encoding = "UTF-8"
+ errors = "strict"
+
+ def close(self):
+ # Do nothing.
+ return None
+
+ def isatty(self):
+ return False
+
+ def writelines(self, iterable):
+ for line in iterable:
+ self.write(line)
+
+ def flush(self):
+ flush()
+
+class GdbOutputFile (_GdbFile):
+ def write(self, s):
+ write(s, stream=STDOUT)
+
+sys.stdout = GdbOutputFile()
+
+class GdbOutputErrorFile (_GdbFile):
+ def write(self, s):
+ write(s, stream=STDERR)
+
+sys.stderr = GdbOutputErrorFile()
+
+# Default prompt hook does nothing.
+prompt_hook = None
+
+# Ensure that sys.argv is set to something.
+# We do not use PySys_SetArgvEx because it did not appear until 2.6.6.
+sys.argv = ['']
+
+# Initial pretty printers.
+pretty_printers = []
+
+# Initial type printers.
+type_printers = []
+
+# Convenience variable to GDB's python directory
+PYTHONDIR = os.path.dirname(os.path.dirname(__file__))
+
+# Auto-load all functions/commands.
+
+# Packages to auto-load.
+
+packages = [
+ 'function',
+ 'command'
+]
+
+# pkgutil.iter_modules is not available prior to Python 2.6. Instead,
+# manually iterate the list, collating the Python files in each module
+# path. Construct the module name, and import.
+
+def auto_load_packages():
+ for package in packages:
+ location = os.path.join(os.path.dirname(__file__), package)
+ if os.path.exists(location):
+ py_files = filter(lambda x: x.endswith('.py')
+ and x != '__init__.py',
+ os.listdir(location))
+
+ for py_file in py_files:
+ # Construct from foo.py, gdb.module.foo
+ modname = "%s.%s.%s" % ( __name__, package, py_file[:-3] )
+ try:
+ if modname in sys.modules:
+ # reload modules with duplicate names
+ reload(__import__(modname))
+ else:
+ __import__(modname)
+ except:
+ sys.stderr.write (traceback.format_exc() + "\n")
+
+auto_load_packages()
+
+def GdbSetPythonDirectory(dir):
+ """Update sys.path, reload gdb and auto-load packages."""
+ global PYTHONDIR
+
+ try:
+ sys.path.remove(PYTHONDIR)
+ except ValueError:
+ pass
+ sys.path.insert(0, dir)
+
+ PYTHONDIR = dir
+
+ # note that reload overwrites the gdb module without deleting existing
+ # attributes
+ reload(__import__(__name__))
+ auto_load_packages()
diff --git a/share/gdb/python/gdb/command/__init__.py b/share/gdb/python/gdb/command/__init__.py
new file mode 100644
index 0000000..21eaef8
--- /dev/null
+++ b/share/gdb/python/gdb/command/__init__.py
@@ -0,0 +1,16 @@
+# Copyright (C) 2010-2013 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
diff --git a/share/gdb/python/gdb/command/explore.py b/share/gdb/python/gdb/command/explore.py
new file mode 100644
index 0000000..dd77875
--- /dev/null
+++ b/share/gdb/python/gdb/command/explore.py
@@ -0,0 +1,760 @@
+# GDB 'explore' command.
+# Copyright (C) 2012-2013 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Implementation of the GDB 'explore' command using the GDB Python API."""
+
+import gdb
+import sys
+
+if sys.version_info[0] > 2:
+ # Python 3 renamed raw_input to input
+ raw_input = input
+
+class Explorer(object):
+ """Internal class which invokes other explorers."""
+
+ # This map is filled by the Explorer.init_env() function
+ type_code_to_explorer_map = { }
+
+ _SCALAR_TYPE_LIST = (
+ gdb.TYPE_CODE_CHAR,
+ gdb.TYPE_CODE_INT,
+ gdb.TYPE_CODE_BOOL,
+ gdb.TYPE_CODE_FLT,
+ gdb.TYPE_CODE_VOID,
+ gdb.TYPE_CODE_ENUM,
+ )
+
+ @staticmethod
+ def guard_expr(expr):
+ length = len(expr)
+ guard = False
+
+ if expr[0] == '(' and expr[length-1] == ')':
+ pass
+ else:
+ i = 0
+ while i < length:
+ c = expr[i]
+ if (c == '_' or ('a' <= c and c <= 'z') or
+ ('A' <= c and c <= 'Z') or ('0' <= c and c <= '9')):
+ pass
+ else:
+ guard = True
+ break
+ i += 1
+
+ if guard:
+ return "(" + expr + ")"
+ else:
+ return expr
+
+ @staticmethod
+ def explore_expr(expr, value, is_child):
+ """Main function to explore an expression value.
+
+ Arguments:
+ expr: The expression string that is being explored.
+ value: The gdb.Value value of the expression.
+ is_child: Boolean value to indicate if the expression is a child.
+ An expression is a child if it is derived from the main
+ expression entered by the user. For example, if the user
+ entered an expression which evaluates to a struct, then
+ when exploring the fields of the struct, is_child is set
+ to True internally.
+
+ Returns:
+ No return value.
+ """
+ type_code = value.type.code
+ if type_code in Explorer.type_code_to_explorer_map:
+ explorer_class = Explorer.type_code_to_explorer_map[type_code]
+ while explorer_class.explore_expr(expr, value, is_child):
+ pass
+ else:
+ print ("Explorer for type '%s' not yet available.\n" %
+ str(value.type))
+
+ @staticmethod
+ def explore_type(name, datatype, is_child):
+ """Main function to explore a data type.
+
+ Arguments:
+ name: The string representing the path to the data type being
+ explored.
+ datatype: The gdb.Type value of the data type being explored.
+ is_child: Boolean value to indicate if the name is a child.
+ A name is a child if it is derived from the main name
+ entered by the user. For example, if the user entered
+ the name of struct type, then when exploring the fields
+ of the struct, is_child is set to True internally.
+
+ Returns:
+ No return value.
+ """
+ type_code = datatype.code
+ if type_code in Explorer.type_code_to_explorer_map:
+ explorer_class = Explorer.type_code_to_explorer_map[type_code]
+ while explorer_class.explore_type(name, datatype, is_child):
+ pass
+ else:
+ print ("Explorer for type '%s' not yet available.\n" %
+ str(datatype))
+
+ @staticmethod
+ def init_env():
+ """Initializes the Explorer environment.
+ This function should be invoked before starting any exploration. If
+ invoked before an exploration, it need not be invoked for subsequent
+ explorations.
+ """
+ Explorer.type_code_to_explorer_map = {
+ gdb.TYPE_CODE_CHAR : ScalarExplorer,
+ gdb.TYPE_CODE_INT : ScalarExplorer,
+ gdb.TYPE_CODE_BOOL : ScalarExplorer,
+ gdb.TYPE_CODE_FLT : ScalarExplorer,
+ gdb.TYPE_CODE_VOID : ScalarExplorer,
+ gdb.TYPE_CODE_ENUM : ScalarExplorer,
+ gdb.TYPE_CODE_STRUCT : CompoundExplorer,
+ gdb.TYPE_CODE_UNION : CompoundExplorer,
+ gdb.TYPE_CODE_PTR : PointerExplorer,
+ gdb.TYPE_CODE_REF : ReferenceExplorer,
+ gdb.TYPE_CODE_TYPEDEF : TypedefExplorer,
+ gdb.TYPE_CODE_ARRAY : ArrayExplorer
+ }
+
+ @staticmethod
+ def is_scalar_type(type):
+ """Checks whether a type is a scalar type.
+ A type is a scalar type of its type is
+ gdb.TYPE_CODE_CHAR or
+ gdb.TYPE_CODE_INT or
+ gdb.TYPE_CODE_BOOL or
+ gdb.TYPE_CODE_FLT or
+ gdb.TYPE_CODE_VOID or
+ gdb.TYPE_CODE_ENUM.
+
+ Arguments:
+ type: The type to be checked.
+
+ Returns:
+ 'True' if 'type' is a scalar type. 'False' otherwise.
+ """
+ return type.code in Explorer._SCALAR_TYPE_LIST
+
+ @staticmethod
+ def return_to_parent_value():
+ """A utility function which prints that the current exploration session
+ is returning to the parent value. Useful when exploring values.
+ """
+ print ("\nReturning to parent value...\n")
+
+ @staticmethod
+ def return_to_parent_value_prompt():
+ """A utility function which prompts the user to press the 'enter' key
+ so that the exploration session can shift back to the parent value.
+ Useful when exploring values.
+ """
+ raw_input("\nPress enter to return to parent value: ")
+
+ @staticmethod
+ def return_to_enclosing_type():
+ """A utility function which prints that the current exploration session
+ is returning to the enclosing type. Useful when exploring types.
+ """
+ print ("\nReturning to enclosing type...\n")
+
+ @staticmethod
+ def return_to_enclosing_type_prompt():
+ """A utility function which prompts the user to press the 'enter' key
+ so that the exploration session can shift back to the enclosing type.
+ Useful when exploring types.
+ """
+ raw_input("\nPress enter to return to enclosing type: ")
+
+
+class ScalarExplorer(object):
+ """Internal class used to explore scalar values."""
+
+ @staticmethod
+ def explore_expr(expr, value, is_child):
+ """Function to explore scalar values.
+ See Explorer.explore_expr and Explorer.is_scalar_type for more
+ information.
+ """
+ print ("'%s' is a scalar value of type '%s'." %
+ (expr, value.type))
+ print ("%s = %s" % (expr, str(value)))
+
+ if is_child:
+ Explorer.return_to_parent_value_prompt()
+ Explorer.return_to_parent_value()
+
+ return False
+
+ @staticmethod
+ def explore_type(name, datatype, is_child):
+ """Function to explore scalar types.
+ See Explorer.explore_type and Explorer.is_scalar_type for more
+ information.
+ """
+ if datatype.code == gdb.TYPE_CODE_ENUM:
+ if is_child:
+ print ("%s is of an enumerated type '%s'." %
+ (name, str(datatype)))
+ else:
+ print ("'%s' is an enumerated type." % name)
+ else:
+ if is_child:
+ print ("%s is of a scalar type '%s'." %
+ (name, str(datatype)))
+ else:
+ print ("'%s' is a scalar type." % name)
+
+ if is_child:
+ Explorer.return_to_enclosing_type_prompt()
+ Explorer.return_to_enclosing_type()
+
+ return False
+
+
+class PointerExplorer(object):
+ """Internal class used to explore pointer values."""
+
+ @staticmethod
+ def explore_expr(expr, value, is_child):
+ """Function to explore pointer values.
+ See Explorer.explore_expr for more information.
+ """
+ print ("'%s' is a pointer to a value of type '%s'" %
+ (expr, str(value.type.target())))
+ option = raw_input("Continue exploring it as a pointer to a single "
+ "value [y/n]: ")
+ if option == "y":
+ deref_value = None
+ try:
+ deref_value = value.dereference()
+ str(deref_value)
+ except gdb.MemoryError:
+ print ("'%s' a pointer pointing to an invalid memory "
+ "location." % expr)
+ if is_child:
+ Explorer.return_to_parent_value_prompt()
+ return False
+ Explorer.explore_expr("*%s" % Explorer.guard_expr(expr),
+ deref_value, is_child)
+ return False
+
+ option = raw_input("Continue exploring it as a pointer to an "
+ "array [y/n]: ")
+ if option == "y":
+ while True:
+ index = 0
+ try:
+ index = int(raw_input("Enter the index of the element you "
+ "want to explore in '%s': " % expr))
+ except ValueError:
+ break
+ element_expr = "%s[%d]" % (Explorer.guard_expr(expr), index)
+ element = value[index]
+ try:
+ str(element)
+ except gdb.MemoryError:
+ print ("Cannot read value at index %d." % index)
+ continue
+ Explorer.explore_expr(element_expr, element, True)
+ return False
+
+ if is_child:
+ Explorer.return_to_parent_value()
+ return False
+
+ @staticmethod
+ def explore_type(name, datatype, is_child):
+ """Function to explore pointer types.
+ See Explorer.explore_type for more information.
+ """
+ target_type = datatype.target()
+ print ("\n%s is a pointer to a value of type '%s'." %
+ (name, str(target_type)))
+
+ Explorer.explore_type("the pointee type of %s" % name,
+ target_type,
+ is_child)
+ return False
+
+
+class ReferenceExplorer(object):
+ """Internal class used to explore reference (TYPE_CODE_REF) values."""
+
+ @staticmethod
+ def explore_expr(expr, value, is_child):
+ """Function to explore array values.
+ See Explorer.explore_expr for more information.
+ """
+ referenced_value = value.referenced_value()
+ Explorer.explore_expr(expr, referenced_value, is_child)
+ return False
+
+ @staticmethod
+ def explore_type(name, datatype, is_child):
+ """Function to explore pointer types.
+ See Explorer.explore_type for more information.
+ """
+ target_type = datatype.target()
+ Explorer.explore_type(name, target_type, is_child)
+ return False
+
+
+class ArrayExplorer(object):
+ """Internal class used to explore arrays."""
+
+ @staticmethod
+ def explore_expr(expr, value, is_child):
+ """Function to explore array values.
+ See Explorer.explore_expr for more information.
+ """
+ target_type = value.type.target()
+ print ("'%s' is an array of '%s'." % (expr, str(target_type)))
+ index = 0
+ try:
+ index = int(raw_input("Enter the index of the element you want to "
+ "explore in '%s': " % expr))
+ except ValueError:
+ if is_child:
+ Explorer.return_to_parent_value()
+ return False
+
+ element = None
+ try:
+ element = value[index]
+ str(element)
+ except gdb.MemoryError:
+ print ("Cannot read value at index %d." % index)
+ raw_input("Press enter to continue... ")
+ return True
+
+ Explorer.explore_expr("%s[%d]" % (Explorer.guard_expr(expr), index),
+ element, True)
+ return True
+
+ @staticmethod
+ def explore_type(name, datatype, is_child):
+ """Function to explore array types.
+ See Explorer.explore_type for more information.
+ """
+ target_type = datatype.target()
+ print ("%s is an array of '%s'." % (name, str(target_type)))
+
+ Explorer.explore_type("the array element of %s" % name, target_type,
+ is_child)
+ return False
+
+
+class CompoundExplorer(object):
+ """Internal class used to explore struct, classes and unions."""
+
+ @staticmethod
+ def _print_fields(print_list):
+ """Internal function which prints the fields of a struct/class/union.
+ """
+ max_field_name_length = 0
+ for pair in print_list:
+ if max_field_name_length < len(pair[0]):
+ max_field_name_length = len(pair[0])
+
+ for pair in print_list:
+ print (" %*s = %s" % (max_field_name_length, pair[0], pair[1]))
+
+ @staticmethod
+ def _get_real_field_count(fields):
+ real_field_count = 0;
+ for field in fields:
+ if not field.artificial:
+ real_field_count = real_field_count + 1
+
+ return real_field_count
+
+ @staticmethod
+ def explore_expr(expr, value, is_child):
+ """Function to explore structs/classes and union values.
+ See Explorer.explore_expr for more information.
+ """
+ datatype = value.type
+ type_code = datatype.code
+ fields = datatype.fields()
+
+ if type_code == gdb.TYPE_CODE_STRUCT:
+ type_desc = "struct/class"
+ else:
+ type_desc = "union"
+
+ if CompoundExplorer._get_real_field_count(fields) == 0:
+ print ("The value of '%s' is a %s of type '%s' with no fields." %
+ (expr, type_desc, str(value.type)))
+ if is_child:
+ Explorer.return_to_parent_value_prompt()
+ return False
+
+ print ("The value of '%s' is a %s of type '%s' with the following "
+ "fields:\n" % (expr, type_desc, str(value.type)))
+
+ has_explorable_fields = False
+ choice_to_compound_field_map = { }
+ current_choice = 0
+ print_list = [ ]
+ for field in fields:
+ if field.artificial:
+ continue
+ field_full_name = Explorer.guard_expr(expr) + "." + field.name
+ if field.is_base_class:
+ field_value = value.cast(field.type)
+ else:
+ field_value = value[field.name]
+ literal_value = ""
+ if type_code == gdb.TYPE_CODE_UNION:
+ literal_value = ("<Enter %d to explore this field of type "
+ "'%s'>" % (current_choice, str(field.type)))
+ has_explorable_fields = True
+ else:
+ if Explorer.is_scalar_type(field.type):
+ literal_value = ("%s .. (Value of type '%s')" %
+ (str(field_value), str(field.type)))
+ else:
+ if field.is_base_class:
+ field_desc = "base class"
+ else:
+ field_desc = "field"
+ literal_value = ("<Enter %d to explore this %s of type "
+ "'%s'>" %
+ (current_choice, field_desc,
+ str(field.type)))
+ has_explorable_fields = True
+
+ choice_to_compound_field_map[str(current_choice)] = (
+ field_full_name, field_value)
+ current_choice = current_choice + 1
+
+ print_list.append((field.name, literal_value))
+
+ CompoundExplorer._print_fields(print_list)
+ print ("")
+
+ if has_explorable_fields:
+ choice = raw_input("Enter the field number of choice: ")
+ if choice in choice_to_compound_field_map:
+ Explorer.explore_expr(choice_to_compound_field_map[choice][0],
+ choice_to_compound_field_map[choice][1],
+ True)
+ return True
+ else:
+ if is_child:
+ Explorer.return_to_parent_value()
+ else:
+ if is_child:
+ Explorer.return_to_parent_value_prompt()
+
+ return False
+
+ @staticmethod
+ def explore_type(name, datatype, is_child):
+ """Function to explore struct/class and union types.
+ See Explorer.explore_type for more information.
+ """
+ type_code = datatype.code
+ type_desc = ""
+ if type_code == gdb.TYPE_CODE_STRUCT:
+ type_desc = "struct/class"
+ else:
+ type_desc = "union"
+
+ fields = datatype.fields()
+ if CompoundExplorer._get_real_field_count(fields) == 0:
+ if is_child:
+ print ("%s is a %s of type '%s' with no fields." %
+ (name, type_desc, str(datatype)))
+ Explorer.return_to_enclosing_type_prompt()
+ else:
+ print ("'%s' is a %s with no fields." % (name, type_desc))
+ return False
+
+ if is_child:
+ print ("%s is a %s of type '%s' "
+ "with the following fields:\n" %
+ (name, type_desc, str(datatype)))
+ else:
+ print ("'%s' is a %s with the following "
+ "fields:\n" %
+ (name, type_desc))
+
+ has_explorable_fields = False
+ current_choice = 0
+ choice_to_compound_field_map = { }
+ print_list = [ ]
+ for field in fields:
+ if field.artificial:
+ continue
+ if field.is_base_class:
+ field_desc = "base class"
+ else:
+ field_desc = "field"
+ rhs = ("<Enter %d to explore this %s of type '%s'>" %
+ (current_choice, field_desc, str(field.type)))
+ print_list.append((field.name, rhs))
+ choice_to_compound_field_map[str(current_choice)] = (
+ field.name, field.type, field_desc)
+ current_choice = current_choice + 1
+
+ CompoundExplorer._print_fields(print_list)
+ print ("")
+
+ if len(choice_to_compound_field_map) > 0:
+ choice = raw_input("Enter the field number of choice: ")
+ if choice in choice_to_compound_field_map:
+ if is_child:
+ new_name = ("%s '%s' of %s" %
+ (choice_to_compound_field_map[choice][2],
+ choice_to_compound_field_map[choice][0],
+ name))
+ else:
+ new_name = ("%s '%s' of '%s'" %
+ (choice_to_compound_field_map[choice][2],
+ choice_to_compound_field_map[choice][0],
+ name))
+ Explorer.explore_type(new_name,
+ choice_to_compound_field_map[choice][1], True)
+ return True
+ else:
+ if is_child:
+ Explorer.return_to_enclosing_type()
+ else:
+ if is_child:
+ Explorer.return_to_enclosing_type_prompt()
+
+ return False
+
+
+class TypedefExplorer(object):
+ """Internal class used to explore values whose type is a typedef."""
+
+ @staticmethod
+ def explore_expr(expr, value, is_child):
+ """Function to explore typedef values.
+ See Explorer.explore_expr for more information.
+ """
+ actual_type = value.type.strip_typedefs()
+ print ("The value of '%s' is of type '%s' "
+ "which is a typedef of type '%s'" %
+ (expr, str(value.type), str(actual_type)))
+
+ Explorer.explore_expr(expr, value.cast(actual_type), is_child)
+ return False
+
+ @staticmethod
+ def explore_type(name, datatype, is_child):
+ """Function to explore typedef types.
+ See Explorer.explore_type for more information.
+ """
+ actual_type = datatype.strip_typedefs()
+ if is_child:
+ print ("The type of %s is a typedef of type '%s'." %
+ (name, str(actual_type)))
+ else:
+ print ("The type '%s' is a typedef of type '%s'." %
+ (name, str(actual_type)))
+
+ Explorer.explore_type(name, actual_type, is_child)
+ return False
+
+
+class ExploreUtils(object):
+ """Internal class which provides utilities for the main command classes."""
+
+ @staticmethod
+ def check_args(name, arg_str):
+ """Utility to check if adequate number of arguments are passed to an
+ explore command.
+
+ Arguments:
+ name: The name of the explore command.
+ arg_str: The argument string passed to the explore command.
+
+ Returns:
+ True if adequate arguments are passed, false otherwise.
+
+ Raises:
+ gdb.GdbError if adequate arguments are not passed.
+ """
+ if len(arg_str) < 1:
+ raise gdb.GdbError("ERROR: '%s' requires an argument."
+ % name)
+ return False
+ else:
+ return True
+
+ @staticmethod
+ def get_type_from_str(type_str):
+ """A utility function to deduce the gdb.Type value from a string
+ representing the type.
+
+ Arguments:
+ type_str: The type string from which the gdb.Type value should be
+ deduced.
+
+ Returns:
+ The deduced gdb.Type value if possible, None otherwise.
+ """
+ try:
+ # Assume the current language to be C/C++ and make a try.
+ return gdb.parse_and_eval("(%s *)0" % type_str).type.target()
+ except RuntimeError:
+ # If assumption of current language to be C/C++ was wrong, then
+ # lookup the type using the API.
+ try:
+ return gdb.lookup_type(type_str)
+ except RuntimeError:
+ return None
+
+ @staticmethod
+ def get_value_from_str(value_str):
+ """A utility function to deduce the gdb.Value value from a string
+ representing the value.
+
+ Arguments:
+ value_str: The value string from which the gdb.Value value should
+ be deduced.
+
+ Returns:
+ The deduced gdb.Value value if possible, None otherwise.
+ """
+ try:
+ return gdb.parse_and_eval(value_str)
+ except RuntimeError:
+ return None
+
+
+class ExploreCommand(gdb.Command):
+ """Explore a value or a type valid in the current context.
+
+ Usage:
+
+ explore ARG
+
+ - ARG is either a valid expression or a type name.
+ - At any stage of exploration, hit the return key (instead of a
+ choice, if any) to return to the enclosing type or value.
+ """
+
+ def __init__(self):
+ super(ExploreCommand, self).__init__(name = "explore",
+ command_class = gdb.COMMAND_DATA,
+ prefix = True)
+
+ def invoke(self, arg_str, from_tty):
+ if ExploreUtils.check_args("explore", arg_str) == False:
+ return
+
+ # Check if it is a value
+ value = ExploreUtils.get_value_from_str(arg_str)
+ if value is not None:
+ Explorer.explore_expr(arg_str, value, False)
+ return
+
+ # If it is not a value, check if it is a type
+ datatype = ExploreUtils.get_type_from_str(arg_str)
+ if datatype is not None:
+ Explorer.explore_type(arg_str, datatype, False)
+ return
+
+ # If it is neither a value nor a type, raise an error.
+ raise gdb.GdbError(
+ ("'%s' neither evaluates to a value nor is a type "
+ "in the current context." %
+ arg_str))
+
+
+class ExploreValueCommand(gdb.Command):
+ """Explore value of an expression valid in the current context.
+
+ Usage:
+
+ explore value ARG
+
+ - ARG is a valid expression.
+ - At any stage of exploration, hit the return key (instead of a
+ choice, if any) to return to the enclosing value.
+ """
+
+ def __init__(self):
+ super(ExploreValueCommand, self).__init__(
+ name = "explore value", command_class = gdb.COMMAND_DATA)
+
+ def invoke(self, arg_str, from_tty):
+ if ExploreUtils.check_args("explore value", arg_str) == False:
+ return
+
+ value = ExploreUtils.get_value_from_str(arg_str)
+ if value is None:
+ raise gdb.GdbError(
+ (" '%s' does not evaluate to a value in the current "
+ "context." %
+ arg_str))
+ return
+
+ Explorer.explore_expr(arg_str, value, False)
+
+
+class ExploreTypeCommand(gdb.Command):
+ """Explore a type or the type of an expression valid in the current
+ context.
+
+ Usage:
+
+ explore type ARG
+
+ - ARG is a valid expression or a type name.
+ - At any stage of exploration, hit the return key (instead of a
+ choice, if any) to return to the enclosing type.
+ """
+
+ def __init__(self):
+ super(ExploreTypeCommand, self).__init__(
+ name = "explore type", command_class = gdb.COMMAND_DATA)
+
+ def invoke(self, arg_str, from_tty):
+ if ExploreUtils.check_args("explore type", arg_str) == False:
+ return
+
+ datatype = ExploreUtils.get_type_from_str(arg_str)
+ if datatype is not None:
+ Explorer.explore_type(arg_str, datatype, False)
+ return
+
+ value = ExploreUtils.get_value_from_str(arg_str)
+ if value is not None:
+ print ("'%s' is of type '%s'." % (arg_str, str(value.type)))
+ Explorer.explore_type(str(value.type), value.type, False)
+ return
+
+ raise gdb.GdbError(("'%s' is not a type or value in the current "
+ "context." % arg_str))
+
+
+Explorer.init_env()
+
+ExploreCommand()
+ExploreValueCommand()
+ExploreTypeCommand()
diff --git a/share/gdb/python/gdb/command/pretty_printers.py b/share/gdb/python/gdb/command/pretty_printers.py
new file mode 100644
index 0000000..7b03e3a
--- /dev/null
+++ b/share/gdb/python/gdb/command/pretty_printers.py
@@ -0,0 +1,368 @@
+# Pretty-printer commands.
+# Copyright (C) 2010-2013 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""GDB commands for working with pretty-printers."""
+
+import copy
+import gdb
+import re
+
+
+def parse_printer_regexps(arg):
+ """Internal utility to parse a pretty-printer command argv.
+
+ Arguments:
+ arg: The arguments to the command. The format is:
+ [object-regexp [name-regexp]].
+ Individual printers in a collection are named as
+ printer-name;subprinter-name.
+
+ Returns:
+ The result is a 3-tuple of compiled regular expressions, except that
+ the resulting compiled subprinter regexp is None if not provided.
+
+ Raises:
+ SyntaxError: an error processing ARG
+ """
+
+ argv = gdb.string_to_argv(arg);
+ argc = len(argv)
+ object_regexp = "" # match everything
+ name_regexp = "" # match everything
+ subname_regexp = None
+ if argc > 3:
+ raise SyntaxError("too many arguments")
+ if argc >= 1:
+ object_regexp = argv[0]
+ if argc >= 2:
+ name_subname = argv[1].split(";", 1)
+ name_regexp = name_subname[0]
+ if len(name_subname) == 2:
+ subname_regexp = name_subname[1]
+ # That re.compile raises SyntaxError was determined empirically.
+ # We catch it and reraise it to provide a slightly more useful
+ # error message for the user.
+ try:
+ object_re = re.compile(object_regexp)
+ except SyntaxError:
+ raise SyntaxError("invalid object regexp: %s" % object_regexp)
+ try:
+ name_re = re.compile (name_regexp)
+ except SyntaxError:
+ raise SyntaxError("invalid name regexp: %s" % name_regexp)
+ if subname_regexp is not None:
+ try:
+ subname_re = re.compile(subname_regexp)
+ except SyntaxError:
+ raise SyntaxError("invalid subname regexp: %s" % subname_regexp)
+ else:
+ subname_re = None
+ return(object_re, name_re, subname_re)
+
+
+def printer_enabled_p(printer):
+ """Internal utility to see if printer (or subprinter) is enabled."""
+ if hasattr(printer, "enabled"):
+ return printer.enabled
+ else:
+ return True
+
+
+class InfoPrettyPrinter(gdb.Command):
+ """GDB command to list all registered pretty-printers.
+
+ Usage: info pretty-printer [object-regexp [name-regexp]]
+
+ OBJECT-REGEXP is a regular expression matching the objects to list.
+ Objects are "global", the program space's file, and the objfiles within
+ that program space.
+
+ NAME-REGEXP matches the name of the pretty-printer.
+ Individual printers in a collection are named as
+ printer-name;subprinter-name.
+ """
+
+ def __init__ (self):
+ super(InfoPrettyPrinter, self).__init__("info pretty-printer",
+ gdb.COMMAND_DATA)
+
+ @staticmethod
+ def enabled_string(printer):
+ """Return "" if PRINTER is enabled, otherwise " [disabled]"."""
+ if printer_enabled_p(printer):
+ return ""
+ else:
+ return " [disabled]"
+
+ @staticmethod
+ def printer_name(printer):
+ """Return the printer's name."""
+ if hasattr(printer, "name"):
+ return printer.name
+ if hasattr(printer, "__name__"):
+ return printer.__name__
+ # This "shouldn't happen", but the public API allows for
+ # direct additions to the pretty-printer list, and we shouldn't
+ # crash because someone added a bogus printer.
+ # Plus we want to give the user a way to list unknown printers.
+ return "unknown"
+
+ def list_pretty_printers(self, pretty_printers, name_re, subname_re):
+ """Print a list of pretty-printers."""
+ # A potential enhancement is to provide an option to list printers in
+ # "lookup order" (i.e. unsorted).
+ sorted_pretty_printers = sorted (copy.copy(pretty_printers),
+ key = self.printer_name)
+ for printer in sorted_pretty_printers:
+ name = self.printer_name(printer)
+ enabled = self.enabled_string(printer)
+ if name_re.match(name):
+ print (" %s%s" % (name, enabled))
+ if (hasattr(printer, "subprinters") and
+ printer.subprinters is not None):
+ sorted_subprinters = sorted (copy.copy(printer.subprinters),
+ key = self.printer_name)
+ for subprinter in sorted_subprinters:
+ if (not subname_re or
+ subname_re.match(subprinter.name)):
+ print (" %s%s" %
+ (subprinter.name,
+ self.enabled_string(subprinter)))
+
+ def invoke1(self, title, printer_list,
+ obj_name_to_match, object_re, name_re, subname_re):
+ """Subroutine of invoke to simplify it."""
+ if printer_list and object_re.match(obj_name_to_match):
+ print (title)
+ self.list_pretty_printers(printer_list, name_re, subname_re)
+
+ def invoke(self, arg, from_tty):
+ """GDB calls this to perform the command."""
+ (object_re, name_re, subname_re) = parse_printer_regexps(arg)
+ self.invoke1("global pretty-printers:", gdb.pretty_printers,
+ "global", object_re, name_re, subname_re)
+ cp = gdb.current_progspace()
+ self.invoke1("progspace %s pretty-printers:" % cp.filename,
+ cp.pretty_printers, "progspace",
+ object_re, name_re, subname_re)
+ for objfile in gdb.objfiles():
+ self.invoke1(" objfile %s pretty-printers:" % objfile.filename,
+ objfile.pretty_printers, objfile.filename,
+ object_re, name_re, subname_re)
+
+
+def count_enabled_printers(pretty_printers):
+ """Return a 2-tuple of number of enabled and total printers."""
+ enabled = 0
+ total = 0
+ for printer in pretty_printers:
+ if (hasattr(printer, "subprinters")
+ and printer.subprinters is not None):
+ if printer_enabled_p(printer):
+ for subprinter in printer.subprinters:
+ if printer_enabled_p(subprinter):
+ enabled += 1
+ total += len(printer.subprinters)
+ else:
+ if printer_enabled_p(printer):
+ enabled += 1
+ total += 1
+ return (enabled, total)
+
+
+def count_all_enabled_printers():
+ """Return a 2-tuble of the enabled state and total number of all printers.
+ This includes subprinters.
+ """
+ enabled_count = 0
+ total_count = 0
+ (t_enabled, t_total) = count_enabled_printers(gdb.pretty_printers)
+ enabled_count += t_enabled
+ total_count += t_total
+ (t_enabled, t_total) = count_enabled_printers(gdb.current_progspace().pretty_printers)
+ enabled_count += t_enabled
+ total_count += t_total
+ for objfile in gdb.objfiles():
+ (t_enabled, t_total) = count_enabled_printers(objfile.pretty_printers)
+ enabled_count += t_enabled
+ total_count += t_total
+ return (enabled_count, total_count)
+
+
+def pluralize(text, n, suffix="s"):
+ """Return TEXT pluralized if N != 1."""
+ if n != 1:
+ return "%s%s" % (text, suffix)
+ else:
+ return text
+
+
+def show_pretty_printer_enabled_summary():
+ """Print the number of printers enabled/disabled.
+ We count subprinters individually.
+ """
+ (enabled_count, total_count) = count_all_enabled_printers()
+ print ("%d of %d printers enabled" % (enabled_count, total_count))
+
+
+def do_enable_pretty_printer_1 (pretty_printers, name_re, subname_re, flag):
+ """Worker for enabling/disabling pretty-printers.
+
+ Arguments:
+ pretty_printers: list of pretty-printers
+ name_re: regular-expression object to select printers
+ subname_re: regular expression object to select subprinters or None
+ if all are affected
+ flag: True for Enable, False for Disable
+
+ Returns:
+ The number of printers affected.
+ This is just for informational purposes for the user.
+ """
+ total = 0
+ for printer in pretty_printers:
+ if (hasattr(printer, "name") and name_re.match(printer.name) or
+ hasattr(printer, "__name__") and name_re.match(printer.__name__)):
+ if (hasattr(printer, "subprinters") and
+ printer.subprinters is not None):
+ if not subname_re:
+ # Only record printers that change state.
+ if printer_enabled_p(printer) != flag:
+ for subprinter in printer.subprinters:
+ if printer_enabled_p(subprinter):
+ total += 1
+ # NOTE: We preserve individual subprinter settings.
+ printer.enabled = flag
+ else:
+ # NOTE: Whether this actually disables the subprinter
+ # depends on whether the printer's lookup function supports
+ # the "enable" API. We can only assume it does.
+ for subprinter in printer.subprinters:
+ if subname_re.match(subprinter.name):
+ # Only record printers that change state.
+ if (printer_enabled_p(printer) and
+ printer_enabled_p(subprinter) != flag):
+ total += 1
+ subprinter.enabled = flag
+ else:
+ # This printer has no subprinters.
+ # If the user does "disable pretty-printer .* .* foo"
+ # should we disable printers that don't have subprinters?
+ # How do we apply "foo" in this context? Since there is no
+ # "foo" subprinter it feels like we should skip this printer.
+ # There's still the issue of how to handle
+ # "disable pretty-printer .* .* .*", and every other variation
+ # that can match everything. For now punt and only support
+ # "disable pretty-printer .* .*" (i.e. subname is elided)
+ # to disable everything.
+ if not subname_re:
+ # Only record printers that change state.
+ if printer_enabled_p(printer) != flag:
+ total += 1
+ printer.enabled = flag
+ return total
+
+
+def do_enable_pretty_printer (arg, flag):
+ """Internal worker for enabling/disabling pretty-printers."""
+ (object_re, name_re, subname_re) = parse_printer_regexps(arg)
+
+ total = 0
+ if object_re.match("global"):
+ total += do_enable_pretty_printer_1(gdb.pretty_printers,
+ name_re, subname_re, flag)
+ cp = gdb.current_progspace()
+ if object_re.match("progspace"):
+ total += do_enable_pretty_printer_1(cp.pretty_printers,
+ name_re, subname_re, flag)
+ for objfile in gdb.objfiles():
+ if object_re.match(objfile.filename):
+ total += do_enable_pretty_printer_1(objfile.pretty_printers,
+ name_re, subname_re, flag)
+
+ if flag:
+ state = "enabled"
+ else:
+ state = "disabled"
+ print ("%d %s %s" % (total, pluralize("printer", total), state))
+
+ # Print the total list of printers currently enabled/disabled.
+ # This is to further assist the user in determining whether the result
+ # is expected. Since we use regexps to select it's useful.
+ show_pretty_printer_enabled_summary()
+
+
+# Enable/Disable one or more pretty-printers.
+#
+# This is intended for use when a broken pretty-printer is shipped/installed
+# and the user wants to disable that printer without disabling all the other
+# printers.
+#
+# A useful addition would be -v (verbose) to show each printer affected.
+
+class EnablePrettyPrinter (gdb.Command):
+ """GDB command to enable the specified pretty-printer.
+
+ Usage: enable pretty-printer [object-regexp [name-regexp]]
+
+ OBJECT-REGEXP is a regular expression matching the objects to examine.
+ Objects are "global", the program space's file, and the objfiles within
+ that program space.
+
+ NAME-REGEXP matches the name of the pretty-printer.
+ Individual printers in a collection are named as
+ printer-name;subprinter-name.
+ """
+
+ def __init__(self):
+ super(EnablePrettyPrinter, self).__init__("enable pretty-printer",
+ gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ """GDB calls this to perform the command."""
+ do_enable_pretty_printer(arg, True)
+
+
+class DisablePrettyPrinter (gdb.Command):
+ """GDB command to disable the specified pretty-printer.
+
+ Usage: disable pretty-printer [object-regexp [name-regexp]]
+
+ OBJECT-REGEXP is a regular expression matching the objects to examine.
+ Objects are "global", the program space's file, and the objfiles within
+ that program space.
+
+ NAME-REGEXP matches the name of the pretty-printer.
+ Individual printers in a collection are named as
+ printer-name;subprinter-name.
+ """
+
+ def __init__(self):
+ super(DisablePrettyPrinter, self).__init__("disable pretty-printer",
+ gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+ """GDB calls this to perform the command."""
+ do_enable_pretty_printer(arg, False)
+
+
+def register_pretty_printer_commands():
+ """Call from a top level script to install the pretty-printer commands."""
+ InfoPrettyPrinter()
+ EnablePrettyPrinter()
+ DisablePrettyPrinter()
+
+register_pretty_printer_commands()
diff --git a/share/gdb/python/gdb/command/prompt.py b/share/gdb/python/gdb/command/prompt.py
new file mode 100644
index 0000000..394e40c
--- /dev/null
+++ b/share/gdb/python/gdb/command/prompt.py
@@ -0,0 +1,66 @@
+# Extended prompt.
+# Copyright (C) 2011-2013 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""GDB command for working with extended prompts."""
+
+import gdb
+import gdb.prompt
+
+class _ExtendedPrompt(gdb.Parameter):
+
+ """Set the extended prompt.
+
+Usage: set extended-prompt VALUE
+
+Substitutions are applied to VALUE to compute the real prompt.
+
+The currently defined substitutions are:
+
+"""
+ # Add the prompt library's dynamically generated help to the
+ # __doc__ string.
+ __doc__ = __doc__ + gdb.prompt.prompt_help()
+
+ set_doc = "Set the extended prompt."
+ show_doc = "Show the extended prompt."
+
+ def __init__(self):
+ super(_ExtendedPrompt, self).__init__("extended-prompt",
+ gdb.COMMAND_SUPPORT,
+ gdb.PARAM_STRING_NOESCAPE)
+ self.value = ''
+ self.hook_set = False
+
+ def get_show_string (self, pvalue):
+ if self.value is not '':
+ return "The extended prompt is: " + self.value
+ else:
+ return "The extended prompt is not set."
+
+ def get_set_string (self):
+ if self.hook_set == False:
+ gdb.prompt_hook = self.before_prompt_hook
+ self.hook_set = True
+ return ""
+
+ def before_prompt_hook(self, current):
+ if self.value is not '':
+ newprompt = gdb.prompt.substitute_prompt(self.value)
+ return newprompt.replace('\\', '\\\\')
+ else:
+ return None
+
+_ExtendedPrompt()
diff --git a/share/gdb/python/gdb/command/type_printers.py b/share/gdb/python/gdb/command/type_printers.py
new file mode 100644
index 0000000..81f2ea1
--- /dev/null
+++ b/share/gdb/python/gdb/command/type_printers.py
@@ -0,0 +1,125 @@
+# Type printer commands.
+# Copyright (C) 2010-2013 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import copy
+import gdb
+
+"""GDB commands for working with type-printers."""
+
+class InfoTypePrinter(gdb.Command):
+ """GDB command to list all registered type-printers.
+
+ Usage: info type-printers
+ """
+
+ def __init__ (self):
+ super(InfoTypePrinter, self).__init__("info type-printers",
+ gdb.COMMAND_DATA)
+
+ def list_type_printers(self, type_printers):
+ """Print a list of type printers."""
+ # A potential enhancement is to provide an option to list printers in
+ # "lookup order" (i.e. unsorted).
+ sorted_type_printers = sorted (copy.copy(type_printers),
+ key = lambda x: x.name)
+ for printer in sorted_type_printers:
+ if printer.enabled:
+ enabled = ''
+ else:
+ enabled = " [disabled]"
+ print (" %s%s" % (printer.name, enabled))
+
+ def invoke(self, arg, from_tty):
+ """GDB calls this to perform the command."""
+ sep = ''
+ for objfile in gdb.objfiles():
+ if objfile.type_printers:
+ print ("%sType printers for %s:" % (sep, objfile.name))
+ self.list_type_printers(objfile.type_printers)
+ sep = '\n'
+ if gdb.current_progspace().type_printers:
+ print ("%sType printers for program space:" % sep)
+ self.list_type_printers(gdb.current_progspace().type_printers)
+ sep = '\n'
+ if gdb.type_printers:
+ print ("%sGlobal type printers:" % sep)
+ self.list_type_printers(gdb.type_printers)
+
+class _EnableOrDisableCommand(gdb.Command):
+ def __init__(self, setting, name):
+ super(_EnableOrDisableCommand, self).__init__(name, gdb.COMMAND_DATA)
+ self.setting = setting
+
+ def set_some(self, name, printers):
+ result = False
+ for p in printers:
+ if name == p.name:
+ p.enabled = self.setting
+ result = True
+ return result
+
+ def invoke(self, arg, from_tty):
+ """GDB calls this to perform the command."""
+ for name in arg.split():
+ ok = False
+ for objfile in gdb.objfiles():
+ if self.set_some(name, objfile.type_printers):
+ ok = True
+ if self.set_some(name, gdb.current_progspace().type_printers):
+ ok = True
+ if self.set_some(name, gdb.type_printers):
+ ok = True
+ if not ok:
+ print ("No type printer named '%s'" % name)
+
+ def add_some(self, result, word, printers):
+ for p in printers:
+ if p.name.startswith(word):
+ result.append(p.name)
+
+ def complete(self, text, word):
+ result = []
+ for objfile in gdb.objfiles():
+ self.add_some(result, word, objfile.type_printers)
+ self.add_some(result, word, gdb.current_progspace().type_printers)
+ self.add_some(result, word, gdb.type_printers)
+ return result
+
+class EnableTypePrinter(_EnableOrDisableCommand):
+ """GDB command to enable the specified type printer.
+
+ Usage: enable type-printer NAME
+
+ NAME is the name of the type-printer.
+ """
+
+ def __init__(self):
+ super(EnableTypePrinter, self).__init__(True, "enable type-printer")
+
+class DisableTypePrinter(_EnableOrDisableCommand):
+ """GDB command to disable the specified type-printer.
+
+ Usage: disable type-printer NAME
+
+ NAME is the name of the type-printer.
+ """
+
+ def __init__(self):
+ super(DisableTypePrinter, self).__init__(False, "disable type-printer")
+
+InfoTypePrinter()
+EnableTypePrinter()
+DisableTypePrinter()
diff --git a/share/gdb/python/gdb/function/__init__.py b/share/gdb/python/gdb/function/__init__.py
new file mode 100644
index 0000000..755bff9
--- /dev/null
+++ b/share/gdb/python/gdb/function/__init__.py
@@ -0,0 +1,14 @@
+# Copyright (C) 2012-2013 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/share/gdb/python/gdb/function/strfns.py b/share/gdb/python/gdb/function/strfns.py
new file mode 100644
index 0000000..efdf950
--- /dev/null
+++ b/share/gdb/python/gdb/function/strfns.py
@@ -0,0 +1,108 @@
+# Useful gdb string convenience functions.
+# Copyright (C) 2012-2013 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""$_memeq, $_strlen, $_streq, $_regex"""
+
+import gdb
+import re
+
+
+class _MemEq(gdb.Function):
+ """$_memeq - compare bytes of memory
+
+Usage:
+ $_memeq(a, b, len)
+
+Returns:
+ True if len bytes at a and b compare equally.
+"""
+ def __init__(self):
+ super(_MemEq, self).__init__("_memeq")
+
+ def invoke(self, a, b, length):
+ if length < 0:
+ raise ValueError("length must be non-negative")
+ if length == 0:
+ return True
+ # The argument(s) to vector are [low_bound,]high_bound.
+ byte_vector = gdb.lookup_type("char").vector(length - 1)
+ ptr_byte_vector = byte_vector.pointer()
+ a_ptr = a.reinterpret_cast(ptr_byte_vector)
+ b_ptr = b.reinterpret_cast(ptr_byte_vector)
+ return a_ptr.dereference() == b_ptr.dereference()
+
+
+class _StrLen(gdb.Function):
+ """$_strlen - compute string length
+
+Usage:
+ $_strlen(a)
+
+Returns:
+ Length of string a, assumed to be a string in the current language.
+"""
+ def __init__(self):
+ super(_StrLen, self).__init__("_strlen")
+
+ def invoke(self, a):
+ s = a.string()
+ return len(s)
+
+
+class _StrEq(gdb.Function):
+ """$_streq - check string equality
+
+Usage:
+ $_streq(a, b)
+
+Returns:
+ True if a and b are identical strings in the current language.
+
+Example (amd64-linux):
+ catch syscall open
+ cond $bpnum $_streq((char*) $rdi, "foo")
+"""
+ def __init__(self):
+ super(_StrEq, self).__init__("_streq")
+
+ def invoke(self, a, b):
+ return a.string() == b.string()
+
+
+class _RegEx(gdb.Function):
+ """$_regex - check if a string matches a regular expression
+
+Usage:
+ $_regex(string, regex)
+
+Returns:
+ True if string str (in the current language) matches the
+ regular expression regex.
+"""
+ def __init__(self):
+ super(_RegEx, self).__init__("_regex")
+
+ def invoke(self, string, regex):
+ s = string.string()
+ r = re.compile(regex.string())
+ return bool(r.match(s))
+
+
+# GDB will import us automagically via gdb/__init__.py.
+_MemEq()
+_StrLen()
+_StrEq()
+_RegEx()
diff --git a/share/gdb/python/gdb/printing.py b/share/gdb/python/gdb/printing.py
new file mode 100644
index 0000000..785a407
--- /dev/null
+++ b/share/gdb/python/gdb/printing.py
@@ -0,0 +1,263 @@
+# Pretty-printer utilities.
+# Copyright (C) 2010-2013 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Utilities for working with pretty-printers."""
+
+import gdb
+import gdb.types
+import re
+import sys
+
+if sys.version_info[0] > 2:
+ # Python 3 removed basestring and long
+ basestring = str
+ long = int
+
+class PrettyPrinter(object):
+ """A basic pretty-printer.
+
+ Attributes:
+ name: A unique string among all printers for the context in which
+ it is defined (objfile, progspace, or global(gdb)), and should
+ meaningfully describe what can be pretty-printed.
+ E.g., "StringPiece" or "protobufs".
+ subprinters: An iterable object with each element having a `name'
+ attribute, and, potentially, "enabled" attribute.
+ Or this is None if there are no subprinters.
+ enabled: A boolean indicating if the printer is enabled.
+
+ Subprinters are for situations where "one" pretty-printer is actually a
+ collection of several printers. E.g., The libstdc++ pretty-printer has
+ a pretty-printer for each of several different types, based on regexps.
+ """
+
+ # While one might want to push subprinters into the subclass, it's
+ # present here to formalize such support to simplify
+ # commands/pretty_printers.py.
+
+ def __init__(self, name, subprinters=None):
+ self.name = name
+ self.subprinters = subprinters
+ self.enabled = True
+
+ def __call__(self, val):
+ # The subclass must define this.
+ raise NotImplementedError("PrettyPrinter __call__")
+
+
+class SubPrettyPrinter(object):
+ """Baseclass for sub-pretty-printers.
+
+ Sub-pretty-printers needn't use this, but it formalizes what's needed.
+
+ Attributes:
+ name: The name of the subprinter.
+ enabled: A boolean indicating if the subprinter is enabled.
+ """
+
+ def __init__(self, name):
+ self.name = name
+ self.enabled = True
+
+
+def register_pretty_printer(obj, printer, replace=False):
+ """Register pretty-printer PRINTER with OBJ.
+
+ The printer is added to the front of the search list, thus one can override
+ an existing printer if one needs to. Use a different name when overriding
+ an existing printer, otherwise an exception will be raised; multiple
+ printers with the same name are disallowed.
+
+ Arguments:
+ obj: Either an objfile, progspace, or None (in which case the printer
+ is registered globally).
+ printer: Either a function of one argument (old way) or any object
+ which has attributes: name, enabled, __call__.
+ replace: If True replace any existing copy of the printer.
+ Otherwise if the printer already exists raise an exception.
+
+ Returns:
+ Nothing.
+
+ Raises:
+ TypeError: A problem with the type of the printer.
+ ValueError: The printer's name contains a semicolon ";".
+ RuntimeError: A printer with the same name is already registered.
+
+ If the caller wants the printer to be listable and disableable, it must
+ follow the PrettyPrinter API. This applies to the old way (functions) too.
+ If printer is an object, __call__ is a method of two arguments:
+ self, and the value to be pretty-printed. See PrettyPrinter.
+ """
+
+ # Watch for both __name__ and name.
+ # Functions get the former for free, but we don't want to use an
+ # attribute named __foo__ for pretty-printers-as-objects.
+ # If printer has both, we use `name'.
+ if not hasattr(printer, "__name__") and not hasattr(printer, "name"):
+ raise TypeError("printer missing attribute: name")
+ if hasattr(printer, "name") and not hasattr(printer, "enabled"):
+ raise TypeError("printer missing attribute: enabled")
+ if not hasattr(printer, "__call__"):
+ raise TypeError("printer missing attribute: __call__")
+
+ if obj is None:
+ if gdb.parameter("verbose"):
+ gdb.write("Registering global %s pretty-printer ...\n" % name)
+ obj = gdb
+ else:
+ if gdb.parameter("verbose"):
+ gdb.write("Registering %s pretty-printer for %s ...\n" %
+ (printer.name, obj.filename))
+
+ if hasattr(printer, "name"):
+ if not isinstance(printer.name, basestring):
+ raise TypeError("printer name is not a string")
+ # If printer provides a name, make sure it doesn't contain ";".
+ # Semicolon is used by the info/enable/disable pretty-printer commands
+ # to delimit subprinters.
+ if printer.name.find(";") >= 0:
+ raise ValueError("semicolon ';' in printer name")
+ # Also make sure the name is unique.
+ # Alas, we can't do the same for functions and __name__, they could
+ # all have a canonical name like "lookup_function".
+ # PERF: gdb records printers in a list, making this inefficient.
+ i = 0
+ for p in obj.pretty_printers:
+ if hasattr(p, "name") and p.name == printer.name:
+ if replace:
+ del obj.pretty_printers[i]
+ break
+ else:
+ raise RuntimeError("pretty-printer already registered: %s" %
+ printer.name)
+ i = i + 1
+
+ obj.pretty_printers.insert(0, printer)
+
+
+class RegexpCollectionPrettyPrinter(PrettyPrinter):
+ """Class for implementing a collection of regular-expression based pretty-printers.
+
+ Intended usage:
+
+ pretty_printer = RegexpCollectionPrettyPrinter("my_library")
+ pretty_printer.add_printer("myclass1", "^myclass1$", MyClass1Printer)
+ ...
+ pretty_printer.add_printer("myclassN", "^myclassN$", MyClassNPrinter)
+ register_pretty_printer(obj, pretty_printer)
+ """
+
+ class RegexpSubprinter(SubPrettyPrinter):
+ def __init__(self, name, regexp, gen_printer):
+ super(RegexpCollectionPrettyPrinter.RegexpSubprinter, self).__init__(name)
+ self.regexp = regexp
+ self.gen_printer = gen_printer
+ self.compiled_re = re.compile(regexp)
+
+ def __init__(self, name):
+ super(RegexpCollectionPrettyPrinter, self).__init__(name, [])
+
+ def add_printer(self, name, regexp, gen_printer):
+ """Add a printer to the list.
+
+ The printer is added to the end of the list.
+
+ Arguments:
+ name: The name of the subprinter.
+ regexp: The regular expression, as a string.
+ gen_printer: A function/method that given a value returns an
+ object to pretty-print it.
+
+ Returns:
+ Nothing.
+ """
+
+ # NOTE: A previous version made the name of each printer the regexp.
+ # That makes it awkward to pass to the enable/disable commands (it's
+ # cumbersome to make a regexp of a regexp). So now the name is a
+ # separate parameter.
+
+ self.subprinters.append(self.RegexpSubprinter(name, regexp,
+ gen_printer))
+
+ def __call__(self, val):
+ """Lookup the pretty-printer for the provided value."""
+
+ # Get the type name.
+ typename = gdb.types.get_basic_type(val.type).tag
+ if not typename:
+ return None
+
+ # Iterate over table of type regexps to determine
+ # if a printer is registered for that type.
+ # Return an instantiation of the printer if found.
+ for printer in self.subprinters:
+ if printer.enabled and printer.compiled_re.search(typename):
+ return printer.gen_printer(val)
+
+ # Cannot find a pretty printer. Return None.
+ return None
+
+# A helper class for printing enum types. This class is instantiated
+# with a list of enumerators to print a particular Value.
+class _EnumInstance:
+ def __init__(self, enumerators, val):
+ self.enumerators = enumerators
+ self.val = val
+
+ def to_string(self):
+ flag_list = []
+ v = long(self.val)
+ any_found = False
+ for (e_name, e_value) in self.enumerators:
+ if v & e_value != 0:
+ flag_list.append(e_name)
+ v = v & ~e_value
+ any_found = True
+ if not any_found or v != 0:
+ # Leftover value.
+ flag_list.append('<unknown: 0x%x>' % v)
+ return "0x%x [%s]" % (self.val, " | ".join(flag_list))
+
+class FlagEnumerationPrinter(PrettyPrinter):
+ """A pretty-printer which can be used to print a flag-style enumeration.
+ A flag-style enumeration is one where the enumerators are or'd
+ together to create values. The new printer will print these
+ symbolically using '|' notation. The printer must be registered
+ manually. This printer is most useful when an enum is flag-like,
+ but has some overlap. GDB's built-in printing will not handle
+ this case, but this printer will attempt to."""
+
+ def __init__(self, enum_type):
+ super(FlagEnumerationPrinter, self).__init__(enum_type)
+ self.initialized = False
+
+ def __call__(self, val):
+ if not self.initialized:
+ self.initialized = True
+ flags = gdb.lookup_type(self.name)
+ self.enumerators = []
+ for field in flags.fields():
+ self.enumerators.append((field.name, field.enumval))
+ # Sorting the enumerators by value usually does the right
+ # thing.
+ self.enumerators.sort(key = lambda x: x.enumval)
+
+ if self.enabled:
+ return _EnumInstance(self.enumerators, val)
+ else:
+ return None
diff --git a/share/gdb/python/gdb/prompt.py b/share/gdb/python/gdb/prompt.py
new file mode 100644
index 0000000..bb1975b
--- /dev/null
+++ b/share/gdb/python/gdb/prompt.py
@@ -0,0 +1,148 @@
+# Extended prompt utilities.
+# Copyright (C) 2011-2013 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+""" Extended prompt library functions."""
+
+import gdb
+import os
+
+def _prompt_pwd(ignore):
+ "The current working directory."
+ return os.getcwdu()
+
+def _prompt_object_attr(func, what, attr, nattr):
+ """Internal worker for fetching GDB attributes."""
+ if attr is None:
+ attr = nattr
+ try:
+ obj = func()
+ except gdb.error:
+ return '<no %s>' % what
+ if hasattr(obj, attr):
+ result = getattr(obj, attr)
+ if callable(result):
+ result = result()
+ return result
+ else:
+ return '<no attribute %s on current %s>' % (attr, what)
+
+def _prompt_frame(attr):
+ "The selected frame; an argument names a frame parameter."
+ return _prompt_object_attr(gdb.selected_frame, 'frame', attr, 'name')
+
+def _prompt_thread(attr):
+ "The selected thread; an argument names a thread parameter."
+ return _prompt_object_attr(gdb.selected_thread, 'thread', attr, 'num')
+
+def _prompt_version(attr):
+ "The version of GDB."
+ return gdb.VERSION
+
+def _prompt_esc(attr):
+ "The ESC character."
+ return '\033'
+
+def _prompt_bs(attr):
+ "A backslash."
+ return '\\'
+
+def _prompt_n(attr):
+ "A newline."
+ return '\n'
+
+def _prompt_r(attr):
+ "A carriage return."
+ return '\r'
+
+def _prompt_param(attr):
+ "A parameter's value; the argument names the parameter."
+ return gdb.parameter(attr)
+
+def _prompt_noprint_begin(attr):
+ "Begins a sequence of non-printing characters."
+ return '\001'
+
+def _prompt_noprint_end(attr):
+ "Ends a sequence of non-printing characters."
+ return '\002'
+
+prompt_substitutions = {
+ 'e': _prompt_esc,
+ '\\': _prompt_bs,
+ 'n': _prompt_n,
+ 'r': _prompt_r,
+ 'v': _prompt_version,
+ 'w': _prompt_pwd,
+ 'f': _prompt_frame,
+ 't': _prompt_thread,
+ 'p': _prompt_param,
+ '[': _prompt_noprint_begin,
+ ']': _prompt_noprint_end
+}
+
+def prompt_help():
+ """Generate help dynamically from the __doc__ strings of attribute
+ functions."""
+
+ result = ''
+ keys = sorted (prompt_substitutions.keys())
+ for key in keys:
+ result += ' \\%s\t%s\n' % (key, prompt_substitutions[key].__doc__)
+ result += """
+A substitution can be used in a simple form, like "\\f".
+An argument can also be passed to it, like "\\f{name}".
+The meaning of the argument depends on the particular substitution."""
+ return result
+
+def substitute_prompt(prompt):
+ "Perform substitutions on PROMPT."
+
+ result = ''
+ plen = len(prompt)
+ i = 0
+ while i < plen:
+ if prompt[i] == '\\':
+ i = i + 1
+ if i >= plen:
+ break
+ cmdch = prompt[i]
+
+ if cmdch in prompt_substitutions:
+ cmd = prompt_substitutions[cmdch]
+
+ if i + 1 < plen and prompt[i + 1] == '{':
+ j = i + 1
+ while j < plen and prompt[j] != '}':
+ j = j + 1
+ # Just ignore formatting errors.
+ if j >= plen or prompt[j] != '}':
+ arg = None
+ else:
+ arg = prompt[i + 2 : j]
+ i = j
+ else:
+ arg = None
+ result += str(cmd(arg))
+ else:
+ # Unrecognized escapes are turned into the escaped
+ # character itself.
+ result += prompt[i]
+ else:
+ result += prompt[i]
+
+ i = i + 1
+
+ return result
diff --git a/share/gdb/python/gdb/types.py b/share/gdb/python/gdb/types.py
new file mode 100644
index 0000000..ffc817c
--- /dev/null
+++ b/share/gdb/python/gdb/types.py
@@ -0,0 +1,176 @@
+# Type utilities.
+# Copyright (C) 2010-2013 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Utilities for working with gdb.Types."""
+
+import gdb
+
+
+def get_basic_type(type_):
+ """Return the "basic" type of a type.
+
+ Arguments:
+ type_: The type to reduce to its basic type.
+
+ Returns:
+ type_ with const/volatile is stripped away,
+ and typedefs/references converted to the underlying type.
+ """
+
+ while (type_.code == gdb.TYPE_CODE_REF or
+ type_.code == gdb.TYPE_CODE_TYPEDEF):
+ if type_.code == gdb.TYPE_CODE_REF:
+ type_ = type_.target()
+ else:
+ type_ = type_.strip_typedefs()
+ return type_.unqualified()
+
+
+def has_field(type_, field):
+ """Return True if a type has the specified field.
+
+ Arguments:
+ type_: The type to examine.
+ It must be one of gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION.
+ field: The name of the field to look up.
+
+ Returns:
+ True if the field is present either in type_ or any baseclass.
+
+ Raises:
+ TypeError: The type is not a struct or union.
+ """
+
+ type_ = get_basic_type(type_)
+ if (type_.code != gdb.TYPE_CODE_STRUCT and
+ type_.code != gdb.TYPE_CODE_UNION):
+ raise TypeError("not a struct or union")
+ for f in type_.fields():
+ if f.is_base_class:
+ if has_field(f.type, field):
+ return True
+ else:
+ # NOTE: f.name could be None
+ if f.name == field:
+ return True
+ return False
+
+
+def make_enum_dict(enum_type):
+ """Return a dictionary from a program's enum type.
+
+ Arguments:
+ enum_type: The enum to compute the dictionary for.
+
+ Returns:
+ The dictionary of the enum.
+
+ Raises:
+ TypeError: The type is not an enum.
+ """
+
+ if enum_type.code != gdb.TYPE_CODE_ENUM:
+ raise TypeError("not an enum type")
+ enum_dict = {}
+ for field in enum_type.fields():
+ # The enum's value is stored in "enumval".
+ enum_dict[field.name] = field.enumval
+ return enum_dict
+
+
+def deep_items (type_):
+ """Return an iterator that recursively traverses anonymous fields.
+
+ Arguments:
+ type_: The type to traverse. It should be one of
+ gdb.TYPE_CODE_STRUCT or gdb.TYPE_CODE_UNION.
+
+ Returns:
+ an iterator similar to gdb.Type.iteritems(), i.e., it returns
+ pairs of key, value, but for any anonymous struct or union
+ field that field is traversed recursively, depth-first.
+ """
+ for k, v in type_.iteritems ():
+ if k:
+ yield k, v
+ else:
+ for i in deep_items (v.type):
+ yield i
+
+class TypePrinter(object):
+ """The base class for type printers.
+
+ Instances of this type can be used to substitute type names during
+ 'ptype'.
+
+ A type printer must have at least 'name' and 'enabled' attributes,
+ and supply an 'instantiate' method.
+
+ The 'instantiate' method must either return None, or return an
+ object which has a 'recognize' method. This method must accept a
+ gdb.Type argument and either return None, meaning that the type
+ was not recognized, or a string naming the type.
+ """
+
+ def __init__(self, name):
+ self.name = name
+ self.enabled = True
+
+ def instantiate(self):
+ return None
+
+# Helper function for computing the list of type recognizers.
+def _get_some_type_recognizers(result, plist):
+ for printer in plist:
+ if printer.enabled:
+ inst = printer.instantiate()
+ if inst is not None:
+ result.append(inst)
+ return None
+
+def get_type_recognizers():
+ "Return a list of the enabled type recognizers for the current context."
+ result = []
+
+ # First try the objfiles.
+ for objfile in gdb.objfiles():
+ _get_some_type_recognizers(result, objfile.type_printers)
+ # Now try the program space.
+ _get_some_type_recognizers(result, gdb.current_progspace().type_printers)
+ # Finally, globals.
+ _get_some_type_recognizers(result, gdb.type_printers)
+
+ return result
+
+def apply_type_recognizers(recognizers, type_obj):
+ """Apply the given list of type recognizers to the type TYPE_OBJ.
+ If any recognizer in the list recognizes TYPE_OBJ, returns the name
+ given by the recognizer. Otherwise, this returns None."""
+ for r in recognizers:
+ result = r.recognize(type_obj)
+ if result is not None:
+ return result
+ return None
+
+def register_type_printer(locus, printer):
+ """Register a type printer.
+ PRINTER is the type printer instance.
+ LOCUS is either an objfile, a program space, or None, indicating
+ global registration."""
+
+ if locus is None:
+ locus = gdb
+ locus.type_printers.insert(0, printer)
diff --git a/share/gdb/syscalls/amd64-linux.xml b/share/gdb/syscalls/amd64-linux.xml
new file mode 100644
index 0000000..bf3da5d
--- /dev/null
+++ b/share/gdb/syscalls/amd64-linux.xml
@@ -0,0 +1,314 @@
+<?xml version="1.0"?>
+<!-- Copyright (C) 2009-2013 Free Software Foundation, Inc.
+
+ Copying and distribution of this file, with or without modification,
+ are permitted in any medium without royalty provided the copyright
+ notice and this notice are preserved. -->
+
+<!DOCTYPE feature SYSTEM "gdb-syscalls.dtd">
+
+<!-- This file was generated using the following file:
+
+ /usr/src/linux/arch/x86/include/asm/unistd_64.h
+
+ The file mentioned above belongs to the Linux Kernel. -->
+
+<syscalls_info>
+ <syscall name="read" number="0"/>
+ <syscall name="write" number="1"/>
+ <syscall name="open" number="2"/>
+ <syscall name="close" number="3"/>
+ <syscall name="stat" number="4"/>
+ <syscall name="fstat" number="5"/>
+ <syscall name="lstat" number="6"/>
+ <syscall name="poll" number="7"/>
+ <syscall name="lseek" number="8"/>
+ <syscall name="mmap" number="9"/>
+ <syscall name="mprotect" number="10"/>
+ <syscall name="munmap" number="11"/>
+ <syscall name="brk" number="12"/>
+ <syscall name="rt_sigaction" number="13"/>
+ <syscall name="rt_sigprocmask" number="14"/>
+ <syscall name="rt_sigreturn" number="15"/>
+ <syscall name="ioctl" number="16"/>
+ <syscall name="pread64" number="17"/>
+ <syscall name="pwrite64" number="18"/>
+ <syscall name="readv" number="19"/>
+ <syscall name="writev" number="20"/>
+ <syscall name="access" number="21"/>
+ <syscall name="pipe" number="22"/>
+ <syscall name="select" number="23"/>
+ <syscall name="sched_yield" number="24"/>
+ <syscall name="mremap" number="25"/>
+ <syscall name="msync" number="26"/>
+ <syscall name="mincore" number="27"/>
+ <syscall name="madvise" number="28"/>
+ <syscall name="shmget" number="29"/>
+ <syscall name="shmat" number="30"/>
+ <syscall name="shmctl" number="31"/>
+ <syscall name="dup" number="32"/>
+ <syscall name="dup2" number="33"/>
+ <syscall name="pause" number="34"/>
+ <syscall name="nanosleep" number="35"/>
+ <syscall name="getitimer" number="36"/>
+ <syscall name="alarm" number="37"/>
+ <syscall name="setitimer" number="38"/>
+ <syscall name="getpid" number="39"/>
+ <syscall name="sendfile" number="40"/>
+ <syscall name="socket" number="41"/>
+ <syscall name="connect" number="42"/>
+ <syscall name="accept" number="43"/>
+ <syscall name="sendto" number="44"/>
+ <syscall name="recvfrom" number="45"/>
+ <syscall name="sendmsg" number="46"/>
+ <syscall name="recvmsg" number="47"/>
+ <syscall name="shutdown" number="48"/>
+ <syscall name="bind" number="49"/>
+ <syscall name="listen" number="50"/>
+ <syscall name="getsockname" number="51"/>
+ <syscall name="getpeername" number="52"/>
+ <syscall name="socketpair" number="53"/>
+ <syscall name="setsockopt" number="54"/>
+ <syscall name="getsockopt" number="55"/>
+ <syscall name="clone" number="56"/>
+ <syscall name="fork" number="57"/>
+ <syscall name="vfork" number="58"/>
+ <syscall name="execve" number="59"/>
+ <syscall name="exit" number="60"/>
+ <syscall name="wait4" number="61"/>
+ <syscall name="kill" number="62"/>
+ <syscall name="uname" number="63"/>
+ <syscall name="semget" number="64"/>
+ <syscall name="semop" number="65"/>
+ <syscall name="semctl" number="66"/>
+ <syscall name="shmdt" number="67"/>
+ <syscall name="msgget" number="68"/>
+ <syscall name="msgsnd" number="69"/>
+ <syscall name="msgrcv" number="70"/>
+ <syscall name="msgctl" number="71"/>
+ <syscall name="fcntl" number="72"/>
+ <syscall name="flock" number="73"/>
+ <syscall name="fsync" number="74"/>
+ <syscall name="fdatasync" number="75"/>
+ <syscall name="truncate" number="76"/>
+ <syscall name="ftruncate" number="77"/>
+ <syscall name="getdents" number="78"/>
+ <syscall name="getcwd" number="79"/>
+ <syscall name="chdir" number="80"/>
+ <syscall name="fchdir" number="81"/>
+ <syscall name="rename" number="82"/>
+ <syscall name="mkdir" number="83"/>
+ <syscall name="rmdir" number="84"/>
+ <syscall name="creat" number="85"/>
+ <syscall name="link" number="86"/>
+ <syscall name="unlink" number="87"/>
+ <syscall name="symlink" number="88"/>
+ <syscall name="readlink" number="89"/>
+ <syscall name="chmod" number="90"/>
+ <syscall name="fchmod" number="91"/>
+ <syscall name="chown" number="92"/>
+ <syscall name="fchown" number="93"/>
+ <syscall name="lchown" number="94"/>
+ <syscall name="umask" number="95"/>
+ <syscall name="gettimeofday" number="96"/>
+ <syscall name="getrlimit" number="97"/>
+ <syscall name="getrusage" number="98"/>
+ <syscall name="sysinfo" number="99"/>
+ <syscall name="times" number="100"/>
+ <syscall name="ptrace" number="101"/>
+ <syscall name="getuid" number="102"/>
+ <syscall name="syslog" number="103"/>
+ <syscall name="getgid" number="104"/>
+ <syscall name="setuid" number="105"/>
+ <syscall name="setgid" number="106"/>
+ <syscall name="geteuid" number="107"/>
+ <syscall name="getegid" number="108"/>
+ <syscall name="setpgid" number="109"/>
+ <syscall name="getppid" number="110"/>
+ <syscall name="getpgrp" number="111"/>
+ <syscall name="setsid" number="112"/>
+ <syscall name="setreuid" number="113"/>
+ <syscall name="setregid" number="114"/>
+ <syscall name="getgroups" number="115"/>
+ <syscall name="setgroups" number="116"/>
+ <syscall name="setresuid" number="117"/>
+ <syscall name="getresuid" number="118"/>
+ <syscall name="setresgid" number="119"/>
+ <syscall name="getresgid" number="120"/>
+ <syscall name="getpgid" number="121"/>
+ <syscall name="setfsuid" number="122"/>
+ <syscall name="setfsgid" number="123"/>
+ <syscall name="getsid" number="124"/>
+ <syscall name="capget" number="125"/>
+ <syscall name="capset" number="126"/>
+ <syscall name="rt_sigpending" number="127"/>
+ <syscall name="rt_sigtimedwait" number="128"/>
+ <syscall name="rt_sigqueueinfo" number="129"/>
+ <syscall name="rt_sigsuspend" number="130"/>
+ <syscall name="sigaltstack" number="131"/>
+ <syscall name="utime" number="132"/>
+ <syscall name="mknod" number="133"/>
+ <syscall name="uselib" number="134"/>
+ <syscall name="personality" number="135"/>
+ <syscall name="ustat" number="136"/>
+ <syscall name="statfs" number="137"/>
+ <syscall name="fstatfs" number="138"/>
+ <syscall name="sysfs" number="139"/>
+ <syscall name="getpriority" number="140"/>
+ <syscall name="setpriority" number="141"/>
+ <syscall name="sched_setparam" number="142"/>
+ <syscall name="sched_getparam" number="143"/>
+ <syscall name="sched_setscheduler" number="144"/>
+ <syscall name="sched_getscheduler" number="145"/>
+ <syscall name="sched_get_priority_max" number="146"/>
+ <syscall name="sched_get_priority_min" number="147"/>
+ <syscall name="sched_rr_get_interval" number="148"/>
+ <syscall name="mlock" number="149"/>
+ <syscall name="munlock" number="150"/>
+ <syscall name="mlockall" number="151"/>
+ <syscall name="munlockall" number="152"/>
+ <syscall name="vhangup" number="153"/>
+ <syscall name="modify_ldt" number="154"/>
+ <syscall name="pivot_root" number="155"/>
+ <syscall name="_sysctl" number="156"/>
+ <syscall name="prctl" number="157"/>
+ <syscall name="arch_prctl" number="158"/>
+ <syscall name="adjtimex" number="159"/>
+ <syscall name="setrlimit" number="160"/>
+ <syscall name="chroot" number="161"/>
+ <syscall name="sync" number="162"/>
+ <syscall name="acct" number="163"/>
+ <syscall name="settimeofday" number="164"/>
+ <syscall name="mount" number="165"/>
+ <syscall name="umount2" number="166"/>
+ <syscall name="swapon" number="167"/>
+ <syscall name="swapoff" number="168"/>
+ <syscall name="reboot" number="169"/>
+ <syscall name="sethostname" number="170"/>
+ <syscall name="setdomainname" number="171"/>
+ <syscall name="iopl" number="172"/>
+ <syscall name="ioperm" number="173"/>
+ <syscall name="create_module" number="174"/>
+ <syscall name="init_module" number="175"/>
+ <syscall name="delete_module" number="176"/>
+ <syscall name="get_kernel_syms" number="177"/>
+ <syscall name="query_module" number="178"/>
+ <syscall name="quotactl" number="179"/>
+ <syscall name="nfsservctl" number="180"/>
+ <syscall name="getpmsg" number="181"/>
+ <syscall name="putpmsg" number="182"/>
+ <syscall name="afs_syscall" number="183"/>
+ <syscall name="tuxcall" number="184"/>
+ <syscall name="security" number="185"/>
+ <syscall name="gettid" number="186"/>
+ <syscall name="readahead" number="187"/>
+ <syscall name="setxattr" number="188"/>
+ <syscall name="lsetxattr" number="189"/>
+ <syscall name="fsetxattr" number="190"/>
+ <syscall name="getxattr" number="191"/>
+ <syscall name="lgetxattr" number="192"/>
+ <syscall name="fgetxattr" number="193"/>
+ <syscall name="listxattr" number="194"/>
+ <syscall name="llistxattr" number="195"/>
+ <syscall name="flistxattr" number="196"/>
+ <syscall name="removexattr" number="197"/>
+ <syscall name="lremovexattr" number="198"/>
+ <syscall name="fremovexattr" number="199"/>
+ <syscall name="tkill" number="200"/>
+ <syscall name="time" number="201"/>
+ <syscall name="futex" number="202"/>
+ <syscall name="sched_setaffinity" number="203"/>
+ <syscall name="sched_getaffinity" number="204"/>
+ <syscall name="set_thread_area" number="205"/>
+ <syscall name="io_setup" number="206"/>
+ <syscall name="io_destroy" number="207"/>
+ <syscall name="io_getevents" number="208"/>
+ <syscall name="io_submit" number="209"/>
+ <syscall name="io_cancel" number="210"/>
+ <syscall name="get_thread_area" number="211"/>
+ <syscall name="lookup_dcookie" number="212"/>
+ <syscall name="epoll_create" number="213"/>
+ <syscall name="epoll_ctl_old" number="214"/>
+ <syscall name="epoll_wait_old" number="215"/>
+ <syscall name="remap_file_pages" number="216"/>
+ <syscall name="getdents64" number="217"/>
+ <syscall name="set_tid_address" number="218"/>
+ <syscall name="restart_syscall" number="219"/>
+ <syscall name="semtimedop" number="220"/>
+ <syscall name="fadvise64" number="221"/>
+ <syscall name="timer_create" number="222"/>
+ <syscall name="timer_settime" number="223"/>
+ <syscall name="timer_gettime" number="224"/>
+ <syscall name="timer_getoverrun" number="225"/>
+ <syscall name="timer_delete" number="226"/>
+ <syscall name="clock_settime" number="227"/>
+ <syscall name="clock_gettime" number="228"/>
+ <syscall name="clock_getres" number="229"/>
+ <syscall name="clock_nanosleep" number="230"/>
+ <syscall name="exit_group" number="231"/>
+ <syscall name="epoll_wait" number="232"/>
+ <syscall name="epoll_ctl" number="233"/>
+ <syscall name="tgkill" number="234"/>
+ <syscall name="utimes" number="235"/>
+ <syscall name="vserver" number="236"/>
+ <syscall name="mbind" number="237"/>
+ <syscall name="set_mempolicy" number="238"/>
+ <syscall name="get_mempolicy" number="239"/>
+ <syscall name="mq_open" number="240"/>
+ <syscall name="mq_unlink" number="241"/>
+ <syscall name="mq_timedsend" number="242"/>
+ <syscall name="mq_timedreceive" number="243"/>
+ <syscall name="mq_notify" number="244"/>
+ <syscall name="mq_getsetattr" number="245"/>
+ <syscall name="kexec_load" number="246"/>
+ <syscall name="waitid" number="247"/>
+ <syscall name="add_key" number="248"/>
+ <syscall name="request_key" number="249"/>
+ <syscall name="keyctl" number="250"/>
+ <syscall name="ioprio_set" number="251"/>
+ <syscall name="ioprio_get" number="252"/>
+ <syscall name="inotify_init" number="253"/>
+ <syscall name="inotify_add_watch" number="254"/>
+ <syscall name="inotify_rm_watch" number="255"/>
+ <syscall name="migrate_pages" number="256"/>
+ <syscall name="openat" number="257"/>
+ <syscall name="mkdirat" number="258"/>
+ <syscall name="mknodat" number="259"/>
+ <syscall name="fchownat" number="260"/>
+ <syscall name="futimesat" number="261"/>
+ <syscall name="newfstatat" number="262"/>
+ <syscall name="unlinkat" number="263"/>
+ <syscall name="renameat" number="264"/>
+ <syscall name="linkat" number="265"/>
+ <syscall name="symlinkat" number="266"/>
+ <syscall name="readlinkat" number="267"/>
+ <syscall name="fchmodat" number="268"/>
+ <syscall name="faccessat" number="269"/>
+ <syscall name="pselect6" number="270"/>
+ <syscall name="ppoll" number="271"/>
+ <syscall name="unshare" number="272"/>
+ <syscall name="set_robust_list" number="273"/>
+ <syscall name="get_robust_list" number="274"/>
+ <syscall name="splice" number="275"/>
+ <syscall name="tee" number="276"/>
+ <syscall name="sync_file_range" number="277"/>
+ <syscall name="vmsplice" number="278"/>
+ <syscall name="move_pages" number="279"/>
+ <syscall name="utimensat" number="280"/>
+ <syscall name="epoll_pwait" number="281"/>
+ <syscall name="signalfd" number="282"/>
+ <syscall name="timerfd_create" number="283"/>
+ <syscall name="eventfd" number="284"/>
+ <syscall name="fallocate" number="285"/>
+ <syscall name="timerfd_settime" number="286"/>
+ <syscall name="timerfd_gettime" number="287"/>
+ <syscall name="accept4" number="288"/>
+ <syscall name="signalfd4" number="289"/>
+ <syscall name="eventfd2" number="290"/>
+ <syscall name="epoll_create1" number="291"/>
+ <syscall name="dup3" number="292"/>
+ <syscall name="pipe2" number="293"/>
+ <syscall name="inotify_init1" number="294"/>
+ <syscall name="preadv" number="295"/>
+ <syscall name="pwritev" number="296"/>
+</syscalls_info>
diff --git a/share/gdb/syscalls/gdb-syscalls.dtd b/share/gdb/syscalls/gdb-syscalls.dtd
new file mode 100644
index 0000000..05c1ccf
--- /dev/null
+++ b/share/gdb/syscalls/gdb-syscalls.dtd
@@ -0,0 +1,14 @@
+<!-- Copyright (C) 2009-2013 Free Software Foundation, Inc.
+
+ Copying and distribution of this file, with or without modification,
+ are permitted in any medium without royalty provided the copyright
+ notice and this notice are preserved. -->
+
+<!-- The root element of a syscall info is <syscalls-info>. -->
+
+<!ELEMENT syscalls-info (syscall*)>
+
+<!ELEMENT syscall EMPTY>
+<!ATTLIST syscall
+ name CDATA #REQUIRED
+ number CDATA #REQUIRED>
diff --git a/share/gdb/syscalls/i386-linux.xml b/share/gdb/syscalls/i386-linux.xml
new file mode 100644
index 0000000..80512d8
--- /dev/null
+++ b/share/gdb/syscalls/i386-linux.xml
@@ -0,0 +1,340 @@
+<?xml version="1.0"?>
+<!-- Copyright (C) 2009-2013 Free Software Foundation, Inc.
+
+ Copying and distribution of this file, with or without modification,
+ are permitted in any medium without royalty provided the copyright
+ notice and this notice are preserved. -->
+
+<!DOCTYPE feature SYSTEM "gdb-syscalls.dtd">
+
+<!-- This file was generated using the following file:
+
+ /usr/src/linux/arch/x86/include/asm/unistd_32.h
+
+ The file mentioned above belongs to the Linux Kernel. -->
+
+<syscalls_info>
+ <syscall name="restart_syscall" number="0"/>
+ <syscall name="exit" number="1"/>
+ <syscall name="fork" number="2"/>
+ <syscall name="read" number="3"/>
+ <syscall name="write" number="4"/>
+ <syscall name="open" number="5"/>
+ <syscall name="close" number="6"/>
+ <syscall name="waitpid" number="7"/>
+ <syscall name="creat" number="8"/>
+ <syscall name="link" number="9"/>
+ <syscall name="unlink" number="10"/>
+ <syscall name="execve" number="11"/>
+ <syscall name="chdir" number="12"/>
+ <syscall name="time" number="13"/>
+ <syscall name="mknod" number="14"/>
+ <syscall name="chmod" number="15"/>
+ <syscall name="lchown" number="16"/>
+ <syscall name="break" number="17"/>
+ <syscall name="oldstat" number="18"/>
+ <syscall name="lseek" number="19"/>
+ <syscall name="getpid" number="20"/>
+ <syscall name="mount" number="21"/>
+ <syscall name="umount" number="22"/>
+ <syscall name="setuid" number="23"/>
+ <syscall name="getuid" number="24"/>
+ <syscall name="stime" number="25"/>
+ <syscall name="ptrace" number="26"/>
+ <syscall name="alarm" number="27"/>
+ <syscall name="oldfstat" number="28"/>
+ <syscall name="pause" number="29"/>
+ <syscall name="utime" number="30"/>
+ <syscall name="stty" number="31"/>
+ <syscall name="gtty" number="32"/>
+ <syscall name="access" number="33"/>
+ <syscall name="nice" number="34"/>
+ <syscall name="ftime" number="35"/>
+ <syscall name="sync" number="36"/>
+ <syscall name="kill" number="37"/>
+ <syscall name="rename" number="38"/>
+ <syscall name="mkdir" number="39"/>
+ <syscall name="rmdir" number="40"/>
+ <syscall name="dup" number="41"/>
+ <syscall name="pipe" number="42"/>
+ <syscall name="times" number="43"/>
+ <syscall name="prof" number="44"/>
+ <syscall name="brk" number="45"/>
+ <syscall name="setgid" number="46"/>
+ <syscall name="getgid" number="47"/>
+ <syscall name="signal" number="48"/>
+ <syscall name="geteuid" number="49"/>
+ <syscall name="getegid" number="50"/>
+ <syscall name="acct" number="51"/>
+ <syscall name="umount2" number="52"/>
+ <syscall name="lock" number="53"/>
+ <syscall name="ioctl" number="54"/>
+ <syscall name="fcntl" number="55"/>
+ <syscall name="mpx" number="56"/>
+ <syscall name="setpgid" number="57"/>
+ <syscall name="ulimit" number="58"/>
+ <syscall name="oldolduname" number="59"/>
+ <syscall name="umask" number="60"/>
+ <syscall name="chroot" number="61"/>
+ <syscall name="ustat" number="62"/>
+ <syscall name="dup2" number="63"/>
+ <syscall name="getppid" number="64"/>
+ <syscall name="getpgrp" number="65"/>
+ <syscall name="setsid" number="66"/>
+ <syscall name="sigaction" number="67"/>
+ <syscall name="sgetmask" number="68"/>
+ <syscall name="ssetmask" number="69"/>
+ <syscall name="setreuid" number="70"/>
+ <syscall name="setregid" number="71"/>
+ <syscall name="sigsuspend" number="72"/>
+ <syscall name="sigpending" number="73"/>
+ <syscall name="sethostname" number="74"/>
+ <syscall name="setrlimit" number="75"/>
+ <syscall name="getrlimit" number="76"/>
+ <syscall name="getrusage" number="77"/>
+ <syscall name="gettimeofday" number="78"/>
+ <syscall name="settimeofday" number="79"/>
+ <syscall name="getgroups" number="80"/>
+ <syscall name="setgroups" number="81"/>
+ <syscall name="select" number="82"/>
+ <syscall name="symlink" number="83"/>
+ <syscall name="oldlstat" number="84"/>
+ <syscall name="readlink" number="85"/>
+ <syscall name="uselib" number="86"/>
+ <syscall name="swapon" number="87"/>
+ <syscall name="reboot" number="88"/>
+ <syscall name="readdir" number="89"/>
+ <syscall name="mmap" number="90"/>
+ <syscall name="munmap" number="91"/>
+ <syscall name="truncate" number="92"/>
+ <syscall name="ftruncate" number="93"/>
+ <syscall name="fchmod" number="94"/>
+ <syscall name="fchown" number="95"/>
+ <syscall name="getpriority" number="96"/>
+ <syscall name="setpriority" number="97"/>
+ <syscall name="profil" number="98"/>
+ <syscall name="statfs" number="99"/>
+ <syscall name="fstatfs" number="100"/>
+ <syscall name="ioperm" number="101"/>
+ <syscall name="socketcall" number="102"/>
+ <syscall name="syslog" number="103"/>
+ <syscall name="setitimer" number="104"/>
+ <syscall name="getitimer" number="105"/>
+ <syscall name="stat" number="106"/>
+ <syscall name="lstat" number="107"/>
+ <syscall name="fstat" number="108"/>
+ <syscall name="olduname" number="109"/>
+ <syscall name="iopl" number="110"/>
+ <syscall name="vhangup" number="111"/>
+ <syscall name="idle" number="112"/>
+ <syscall name="vm86old" number="113"/>
+ <syscall name="wait4" number="114"/>
+ <syscall name="swapoff" number="115"/>
+ <syscall name="sysinfo" number="116"/>
+ <syscall name="ipc" number="117"/>
+ <syscall name="fsync" number="118"/>
+ <syscall name="sigreturn" number="119"/>
+ <syscall name="clone" number="120"/>
+ <syscall name="setdomainname" number="121"/>
+ <syscall name="uname" number="122"/>
+ <syscall name="modify_ldt" number="123"/>
+ <syscall name="adjtimex" number="124"/>
+ <syscall name="mprotect" number="125"/>
+ <syscall name="sigprocmask" number="126"/>
+ <syscall name="create_module" number="127"/>
+ <syscall name="init_module" number="128"/>
+ <syscall name="delete_module" number="129"/>
+ <syscall name="get_kernel_syms" number="130"/>
+ <syscall name="quotactl" number="131"/>
+ <syscall name="getpgid" number="132"/>
+ <syscall name="fchdir" number="133"/>
+ <syscall name="bdflush" number="134"/>
+ <syscall name="sysfs" number="135"/>
+ <syscall name="personality" number="136"/>
+ <syscall name="afs_syscall" number="137"/>
+ <syscall name="setfsuid" number="138"/>
+ <syscall name="setfsgid" number="139"/>
+ <syscall name="_llseek" number="140"/>
+ <syscall name="getdents" number="141"/>
+ <syscall name="_newselect" number="142"/>
+ <syscall name="flock" number="143"/>
+ <syscall name="msync" number="144"/>
+ <syscall name="readv" number="145"/>
+ <syscall name="writev" number="146"/>
+ <syscall name="getsid" number="147"/>
+ <syscall name="fdatasync" number="148"/>
+ <syscall name="_sysctl" number="149"/>
+ <syscall name="mlock" number="150"/>
+ <syscall name="munlock" number="151"/>
+ <syscall name="mlockall" number="152"/>
+ <syscall name="munlockall" number="153"/>
+ <syscall name="sched_setparam" number="154"/>
+ <syscall name="sched_getparam" number="155"/>
+ <syscall name="sched_setscheduler" number="156"/>
+ <syscall name="sched_getscheduler" number="157"/>
+ <syscall name="sched_yield" number="158"/>
+ <syscall name="sched_get_priority_max" number="159"/>
+ <syscall name="sched_get_priority_min" number="160"/>
+ <syscall name="sched_rr_get_interval" number="161"/>
+ <syscall name="nanosleep" number="162"/>
+ <syscall name="mremap" number="163"/>
+ <syscall name="setresuid" number="164"/>
+ <syscall name="getresuid" number="165"/>
+ <syscall name="vm86" number="166"/>
+ <syscall name="query_module" number="167"/>
+ <syscall name="poll" number="168"/>
+ <syscall name="nfsservctl" number="169"/>
+ <syscall name="setresgid" number="170"/>
+ <syscall name="getresgid" number="171"/>
+ <syscall name="prctl" number="172"/>
+ <syscall name="rt_sigreturn" number="173"/>
+ <syscall name="rt_sigaction" number="174"/>
+ <syscall name="rt_sigprocmask" number="175"/>
+ <syscall name="rt_sigpending" number="176"/>
+ <syscall name="rt_sigtimedwait" number="177"/>
+ <syscall name="rt_sigqueueinfo" number="178"/>
+ <syscall name="rt_sigsuspend" number="179"/>
+ <syscall name="pread64" number="180"/>
+ <syscall name="pwrite64" number="181"/>
+ <syscall name="chown" number="182"/>
+ <syscall name="getcwd" number="183"/>
+ <syscall name="capget" number="184"/>
+ <syscall name="capset" number="185"/>
+ <syscall name="sigaltstack" number="186"/>
+ <syscall name="sendfile" number="187"/>
+ <syscall name="getpmsg" number="188"/>
+ <syscall name="putpmsg" number="189"/>
+ <syscall name="vfork" number="190"/>
+ <syscall name="ugetrlimit" number="191"/>
+ <syscall name="mmap2" number="192"/>
+ <syscall name="truncate64" number="193"/>
+ <syscall name="ftruncate64" number="194"/>
+ <syscall name="stat64" number="195"/>
+ <syscall name="lstat64" number="196"/>
+ <syscall name="fstat64" number="197"/>
+ <syscall name="lchown32" number="198"/>
+ <syscall name="getuid32" number="199"/>
+ <syscall name="getgid32" number="200"/>
+ <syscall name="geteuid32" number="201"/>
+ <syscall name="getegid32" number="202"/>
+ <syscall name="setreuid32" number="203"/>
+ <syscall name="setregid32" number="204"/>
+ <syscall name="getgroups32" number="205"/>
+ <syscall name="setgroups32" number="206"/>
+ <syscall name="fchown32" number="207"/>
+ <syscall name="setresuid32" number="208"/>
+ <syscall name="getresuid32" number="209"/>
+ <syscall name="setresgid32" number="210"/>
+ <syscall name="getresgid32" number="211"/>
+ <syscall name="chown32" number="212"/>
+ <syscall name="setuid32" number="213"/>
+ <syscall name="setgid32" number="214"/>
+ <syscall name="setfsuid32" number="215"/>
+ <syscall name="setfsgid32" number="216"/>
+ <syscall name="pivot_root" number="217"/>
+ <syscall name="mincore" number="218"/>
+ <syscall name="madvise" number="219"/>
+ <syscall name="madvise1" number="220"/>
+ <syscall name="getdents64" number="221"/>
+ <syscall name="fcntl64" number="222"/>
+ <syscall name="gettid" number="224"/>
+ <syscall name="readahead" number="225"/>
+ <syscall name="setxattr" number="226"/>
+ <syscall name="lsetxattr" number="227"/>
+ <syscall name="fsetxattr" number="228"/>
+ <syscall name="getxattr" number="229"/>
+ <syscall name="lgetxattr" number="230"/>
+ <syscall name="fgetxattr" number="231"/>
+ <syscall name="listxattr" number="232"/>
+ <syscall name="llistxattr" number="233"/>
+ <syscall name="flistxattr" number="234"/>
+ <syscall name="removexattr" number="235"/>
+ <syscall name="lremovexattr" number="236"/>
+ <syscall name="fremovexattr" number="237"/>
+ <syscall name="tkill" number="238"/>
+ <syscall name="sendfile64" number="239"/>
+ <syscall name="futex" number="240"/>
+ <syscall name="sched_setaffinity" number="241"/>
+ <syscall name="sched_getaffinity" number="242"/>
+ <syscall name="set_thread_area" number="243"/>
+ <syscall name="get_thread_area" number="244"/>
+ <syscall name="io_setup" number="245"/>
+ <syscall name="io_destroy" number="246"/>
+ <syscall name="io_getevents" number="247"/>
+ <syscall name="io_submit" number="248"/>
+ <syscall name="io_cancel" number="249"/>
+ <syscall name="fadvise64" number="250"/>
+ <syscall name="exit_group" number="252"/>
+ <syscall name="lookup_dcookie" number="253"/>
+ <syscall name="epoll_create" number="254"/>
+ <syscall name="epoll_ctl" number="255"/>
+ <syscall name="epoll_wait" number="256"/>
+ <syscall name="remap_file_pages" number="257"/>
+ <syscall name="set_tid_address" number="258"/>
+ <syscall name="timer_create" number="259"/>
+ <syscall name="timer_settime" number="260"/>
+ <syscall name="timer_gettime" number="261"/>
+ <syscall name="timer_getoverrun" number="262"/>
+ <syscall name="timer_delete" number="263"/>
+ <syscall name="clock_settime" number="264"/>
+ <syscall name="clock_gettime" number="265"/>
+ <syscall name="clock_getres" number="266"/>
+ <syscall name="clock_nanosleep" number="267"/>
+ <syscall name="statfs64" number="268"/>
+ <syscall name="fstatfs64" number="269"/>
+ <syscall name="tgkill" number="270"/>
+ <syscall name="utimes" number="271"/>
+ <syscall name="fadvise64_64" number="272"/>
+ <syscall name="vserver" number="273"/>
+ <syscall name="mbind" number="274"/>
+ <syscall name="get_mempolicy" number="275"/>
+ <syscall name="set_mempolicy" number="276"/>
+ <syscall name="mq_open" number="277"/>
+ <syscall name="mq_unlink" number="278"/>
+ <syscall name="mq_timedsend" number="279"/>
+ <syscall name="mq_timedreceive" number="280"/>
+ <syscall name="mq_notify" number="281"/>
+ <syscall name="mq_getsetattr" number="282"/>
+ <syscall name="kexec_load" number="283"/>
+ <syscall name="waitid" number="284"/>
+ <syscall name="add_key" number="286"/>
+ <syscall name="request_key" number="287"/>
+ <syscall name="keyctl" number="288"/>
+ <syscall name="ioprio_set" number="289"/>
+ <syscall name="ioprio_get" number="290"/>
+ <syscall name="inotify_init" number="291"/>
+ <syscall name="inotify_add_watch" number="292"/>
+ <syscall name="inotify_rm_watch" number="293"/>
+ <syscall name="migrate_pages" number="294"/>
+ <syscall name="openat" number="295"/>
+ <syscall name="mkdirat" number="296"/>
+ <syscall name="mknodat" number="297"/>
+ <syscall name="fchownat" number="298"/>
+ <syscall name="futimesat" number="299"/>
+ <syscall name="fstatat64" number="300"/>
+ <syscall name="unlinkat" number="301"/>
+ <syscall name="renameat" number="302"/>
+ <syscall name="linkat" number="303"/>
+ <syscall name="symlinkat" number="304"/>
+ <syscall name="readlinkat" number="305"/>
+ <syscall name="fchmodat" number="306"/>
+ <syscall name="faccessat" number="307"/>
+ <syscall name="pselect6" number="308"/>
+ <syscall name="ppoll" number="309"/>
+ <syscall name="unshare" number="310"/>
+ <syscall name="set_robust_list" number="311"/>
+ <syscall name="get_robust_list" number="312"/>
+ <syscall name="splice" number="313"/>
+ <syscall name="sync_file_range" number="314"/>
+ <syscall name="tee" number="315"/>
+ <syscall name="vmsplice" number="316"/>
+ <syscall name="move_pages" number="317"/>
+ <syscall name="getcpu" number="318"/>
+ <syscall name="epoll_pwait" number="319"/>
+ <syscall name="utimensat" number="320"/>
+ <syscall name="signalfd" number="321"/>
+ <syscall name="timerfd_create" number="322"/>
+ <syscall name="eventfd" number="323"/>
+ <syscall name="fallocate" number="324"/>
+ <syscall name="timerfd_settime" number="325"/>
+</syscalls_info>
diff --git a/share/gdb/syscalls/mips-n32-linux.xml b/share/gdb/syscalls/mips-n32-linux.xml
new file mode 100644
index 0000000..b4e2181
--- /dev/null
+++ b/share/gdb/syscalls/mips-n32-linux.xml
@@ -0,0 +1,319 @@
+<?xml version="1.0"?>
+<!-- Copyright (C) 2011-2013 Free Software Foundation, Inc.
+
+ Copying and distribution of this file, with or without modification,
+ are permitted in any medium without royalty provided the copyright
+ notice and this notice are preserved. -->
+
+<!DOCTYPE feature SYSTEM "gdb-syscalls.dtd">
+
+<!-- This file was generated using the following file:
+
+ /usr/src/linux/arch/mips/include/asm/unistd.h
+
+ The file mentioned above belongs to the Linux Kernel. -->
+
+<syscalls_info>
+ <syscall name="read" number="6000"/>
+ <syscall name="write" number="6001"/>
+ <syscall name="open" number="6002"/>
+ <syscall name="close" number="6003"/>
+ <syscall name="stat" number="6004"/>
+ <syscall name="fstat" number="6005"/>
+ <syscall name="lstat" number="6006"/>
+ <syscall name="poll" number="6007"/>
+ <syscall name="lseek" number="6008"/>
+ <syscall name="mmap" number="6009"/>
+ <syscall name="mprotect" number="6010"/>
+ <syscall name="munmap" number="6011"/>
+ <syscall name="brk" number="6012"/>
+ <syscall name="rt_sigaction" number="6013"/>
+ <syscall name="rt_sigprocmask" number="6014"/>
+ <syscall name="ioctl" number="6015"/>
+ <syscall name="pread64" number="6016"/>
+ <syscall name="pwrite64" number="6017"/>
+ <syscall name="readv" number="6018"/>
+ <syscall name="writev" number="6019"/>
+ <syscall name="access" number="6020"/>
+ <syscall name="pipe" number="6021"/>
+ <syscall name="_newselect" number="6022"/>
+ <syscall name="sched_yield" number="6023"/>
+ <syscall name="mremap" number="6024"/>
+ <syscall name="msync" number="6025"/>
+ <syscall name="mincore" number="6026"/>
+ <syscall name="madvise" number="6027"/>
+ <syscall name="shmget" number="6028"/>
+ <syscall name="shmat" number="6029"/>
+ <syscall name="shmctl" number="6030"/>
+ <syscall name="dup" number="6031"/>
+ <syscall name="dup2" number="6032"/>
+ <syscall name="pause" number="6033"/>
+ <syscall name="nanosleep" number="6034"/>
+ <syscall name="getitimer" number="6035"/>
+ <syscall name="setitimer" number="6036"/>
+ <syscall name="alarm" number="6037"/>
+ <syscall name="getpid" number="6038"/>
+ <syscall name="sendfile" number="6039"/>
+ <syscall name="socket" number="6040"/>
+ <syscall name="connect" number="6041"/>
+ <syscall name="accept" number="6042"/>
+ <syscall name="sendto" number="6043"/>
+ <syscall name="recvfrom" number="6044"/>
+ <syscall name="sendmsg" number="6045"/>
+ <syscall name="recvmsg" number="6046"/>
+ <syscall name="shutdown" number="6047"/>
+ <syscall name="bind" number="6048"/>
+ <syscall name="listen" number="6049"/>
+ <syscall name="getsockname" number="6050"/>
+ <syscall name="getpeername" number="6051"/>
+ <syscall name="socketpair" number="6052"/>
+ <syscall name="setsockopt" number="6053"/>
+ <syscall name="getsockopt" number="6054"/>
+ <syscall name="clone" number="6055"/>
+ <syscall name="fork" number="6056"/>
+ <syscall name="execve" number="6057"/>
+ <syscall name="exit" number="6058"/>
+ <syscall name="wait4" number="6059"/>
+ <syscall name="kill" number="6060"/>
+ <syscall name="uname" number="6061"/>
+ <syscall name="semget" number="6062"/>
+ <syscall name="semop" number="6063"/>
+ <syscall name="semctl" number="6064"/>
+ <syscall name="shmdt" number="6065"/>
+ <syscall name="msgget" number="6066"/>
+ <syscall name="msgsnd" number="6067"/>
+ <syscall name="msgrcv" number="6068"/>
+ <syscall name="msgctl" number="6069"/>
+ <syscall name="fcntl" number="6070"/>
+ <syscall name="flock" number="6071"/>
+ <syscall name="fsync" number="6072"/>
+ <syscall name="fdatasync" number="6073"/>
+ <syscall name="truncate" number="6074"/>
+ <syscall name="ftruncate" number="6075"/>
+ <syscall name="getdents" number="6076"/>
+ <syscall name="getcwd" number="6077"/>
+ <syscall name="chdir" number="6078"/>
+ <syscall name="fchdir" number="6079"/>
+ <syscall name="rename" number="6080"/>
+ <syscall name="mkdir" number="6081"/>
+ <syscall name="rmdir" number="6082"/>
+ <syscall name="creat" number="6083"/>
+ <syscall name="link" number="6084"/>
+ <syscall name="unlink" number="6085"/>
+ <syscall name="symlink" number="6086"/>
+ <syscall name="readlink" number="6087"/>
+ <syscall name="chmod" number="6088"/>
+ <syscall name="fchmod" number="6089"/>
+ <syscall name="chown" number="6090"/>
+ <syscall name="fchown" number="6091"/>
+ <syscall name="lchown" number="6092"/>
+ <syscall name="umask" number="6093"/>
+ <syscall name="gettimeofday" number="6094"/>
+ <syscall name="getrlimit" number="6095"/>
+ <syscall name="getrusage" number="6096"/>
+ <syscall name="sysinfo" number="6097"/>
+ <syscall name="times" number="6098"/>
+ <syscall name="ptrace" number="6099"/>
+ <syscall name="getuid" number="6100"/>
+ <syscall name="syslog" number="6101"/>
+ <syscall name="getgid" number="6102"/>
+ <syscall name="setuid" number="6103"/>
+ <syscall name="setgid" number="6104"/>
+ <syscall name="geteuid" number="6105"/>
+ <syscall name="getegid" number="6106"/>
+ <syscall name="setpgid" number="6107"/>
+ <syscall name="getppid" number="6108"/>
+ <syscall name="getpgrp" number="6109"/>
+ <syscall name="setsid" number="6110"/>
+ <syscall name="setreuid" number="6111"/>
+ <syscall name="setregid" number="6112"/>
+ <syscall name="getgroups" number="6113"/>
+ <syscall name="setgroups" number="6114"/>
+ <syscall name="setresuid" number="6115"/>
+ <syscall name="getresuid" number="6116"/>
+ <syscall name="setresgid" number="6117"/>
+ <syscall name="getresgid" number="6118"/>
+ <syscall name="getpgid" number="6119"/>
+ <syscall name="setfsuid" number="6120"/>
+ <syscall name="setfsgid" number="6121"/>
+ <syscall name="getsid" number="6122"/>
+ <syscall name="capget" number="6123"/>
+ <syscall name="capset" number="6124"/>
+ <syscall name="rt_sigpending" number="6125"/>
+ <syscall name="rt_sigtimedwait" number="6126"/>
+ <syscall name="rt_sigqueueinfo" number="6127"/>
+ <syscall name="rt_sigsuspend" number="6128"/>
+ <syscall name="sigaltstack" number="6129"/>
+ <syscall name="utime" number="6130"/>
+ <syscall name="mknod" number="6131"/>
+ <syscall name="personality" number="6132"/>
+ <syscall name="ustat" number="6133"/>
+ <syscall name="statfs" number="6134"/>
+ <syscall name="fstatfs" number="6135"/>
+ <syscall name="sysfs" number="6136"/>
+ <syscall name="getpriority" number="6137"/>
+ <syscall name="setpriority" number="6138"/>
+ <syscall name="sched_setparam" number="6139"/>
+ <syscall name="sched_getparam" number="6140"/>
+ <syscall name="sched_setscheduler" number="6141"/>
+ <syscall name="sched_getscheduler" number="6142"/>
+ <syscall name="sched_get_priority_max" number="6143"/>
+ <syscall name="sched_get_priority_min" number="6144"/>
+ <syscall name="sched_rr_get_interval" number="6145"/>
+ <syscall name="mlock" number="6146"/>
+ <syscall name="munlock" number="6147"/>
+ <syscall name="mlockall" number="6148"/>
+ <syscall name="munlockall" number="6149"/>
+ <syscall name="vhangup" number="6150"/>
+ <syscall name="pivot_root" number="6151"/>
+ <syscall name="_sysctl" number="6152"/>
+ <syscall name="prctl" number="6153"/>
+ <syscall name="adjtimex" number="6154"/>
+ <syscall name="setrlimit" number="6155"/>
+ <syscall name="chroot" number="6156"/>
+ <syscall name="sync" number="6157"/>
+ <syscall name="acct" number="6158"/>
+ <syscall name="settimeofday" number="6159"/>
+ <syscall name="mount" number="6160"/>
+ <syscall name="umount2" number="6161"/>
+ <syscall name="swapon" number="6162"/>
+ <syscall name="swapoff" number="6163"/>
+ <syscall name="reboot" number="6164"/>
+ <syscall name="sethostname" number="6165"/>
+ <syscall name="setdomainname" number="6166"/>
+ <syscall name="create_module" number="6167"/>
+ <syscall name="init_module" number="6168"/>
+ <syscall name="delete_module" number="6169"/>
+ <syscall name="get_kernel_syms" number="6170"/>
+ <syscall name="query_module" number="6171"/>
+ <syscall name="quotactl" number="6172"/>
+ <syscall name="nfsservctl" number="6173"/>
+ <syscall name="getpmsg" number="6174"/>
+ <syscall name="putpmsg" number="6175"/>
+ <syscall name="afs_syscall" number="6176"/>
+ <syscall name="reserved177" number="6177"/>
+ <syscall name="gettid" number="6178"/>
+ <syscall name="readahead" number="6179"/>
+ <syscall name="setxattr" number="6180"/>
+ <syscall name="lsetxattr" number="6181"/>
+ <syscall name="fsetxattr" number="6182"/>
+ <syscall name="getxattr" number="6183"/>
+ <syscall name="lgetxattr" number="6184"/>
+ <syscall name="fgetxattr" number="6185"/>
+ <syscall name="listxattr" number="6186"/>
+ <syscall name="llistxattr" number="6187"/>
+ <syscall name="flistxattr" number="6188"/>
+ <syscall name="removexattr" number="6189"/>
+ <syscall name="lremovexattr" number="6190"/>
+ <syscall name="fremovexattr" number="6191"/>
+ <syscall name="tkill" number="6192"/>
+ <syscall name="reserved193" number="6193"/>
+ <syscall name="futex" number="6194"/>
+ <syscall name="sched_setaffinity" number="6195"/>
+ <syscall name="sched_getaffinity" number="6196"/>
+ <syscall name="cacheflush" number="6197"/>
+ <syscall name="cachectl" number="6198"/>
+ <syscall name="sysmips" number="6199"/>
+ <syscall name="io_setup" number="6200"/>
+ <syscall name="io_destroy" number="6201"/>
+ <syscall name="io_getevents" number="6202"/>
+ <syscall name="io_submit" number="6203"/>
+ <syscall name="io_cancel" number="6204"/>
+ <syscall name="exit_group" number="6205"/>
+ <syscall name="lookup_dcookie" number="6206"/>
+ <syscall name="epoll_create" number="6207"/>
+ <syscall name="epoll_ctl" number="6208"/>
+ <syscall name="epoll_wait" number="6209"/>
+ <syscall name="remap_file_pages" number="6210"/>
+ <syscall name="rt_sigreturn" number="6211"/>
+ <syscall name="fcntl64" number="6212"/>
+ <syscall name="set_tid_address" number="6213"/>
+ <syscall name="restart_syscall" number="6214"/>
+ <syscall name="semtimedop" number="6215"/>
+ <syscall name="fadvise64" number="6216"/>
+ <syscall name="statfs64" number="6217"/>
+ <syscall name="fstatfs64" number="6218"/>
+ <syscall name="sendfile64" number="6219"/>
+ <syscall name="timer_create" number="6220"/>
+ <syscall name="timer_settime" number="6221"/>
+ <syscall name="timer_gettime" number="6222"/>
+ <syscall name="timer_getoverrun" number="6223"/>
+ <syscall name="timer_delete" number="6224"/>
+ <syscall name="clock_settime" number="6225"/>
+ <syscall name="clock_gettime" number="6226"/>
+ <syscall name="clock_getres" number="6227"/>
+ <syscall name="clock_nanosleep" number="6228"/>
+ <syscall name="tgkill" number="6229"/>
+ <syscall name="utimes" number="6230"/>
+ <syscall name="mbind" number="6231"/>
+ <syscall name="get_mempolicy" number="6232"/>
+ <syscall name="set_mempolicy" number="6233"/>
+ <syscall name="mq_open" number="6234"/>
+ <syscall name="mq_unlink" number="6235"/>
+ <syscall name="mq_timedsend" number="6236"/>
+ <syscall name="mq_timedreceive" number="6237"/>
+ <syscall name="mq_notify" number="6238"/>
+ <syscall name="mq_getsetattr" number="6239"/>
+ <syscall name="vserver" number="6240"/>
+ <syscall name="waitid" number="6241"/>
+ <syscall name="add_key" number="6243"/>
+ <syscall name="request_key" number="6244"/>
+ <syscall name="keyctl" number="6245"/>
+ <syscall name="set_thread_area" number="6246"/>
+ <syscall name="inotify_init" number="6247"/>
+ <syscall name="inotify_add_watch" number="6248"/>
+ <syscall name="inotify_rm_watch" number="6249"/>
+ <syscall name="migrate_pages" number="6250"/>
+ <syscall name="openat" number="6251"/>
+ <syscall name="mkdirat" number="6252"/>
+ <syscall name="mknodat" number="6253"/>
+ <syscall name="fchownat" number="6254"/>
+ <syscall name="futimesat" number="6255"/>
+ <syscall name="newfstatat" number="6256"/>
+ <syscall name="unlinkat" number="6257"/>
+ <syscall name="renameat" number="6258"/>
+ <syscall name="linkat" number="6259"/>
+ <syscall name="symlinkat" number="6260"/>
+ <syscall name="readlinkat" number="6261"/>
+ <syscall name="fchmodat" number="6262"/>
+ <syscall name="faccessat" number="6263"/>
+ <syscall name="pselect6" number="6264"/>
+ <syscall name="ppoll" number="6265"/>
+ <syscall name="unshare" number="6266"/>
+ <syscall name="splice" number="6267"/>
+ <syscall name="sync_file_range" number="6268"/>
+ <syscall name="tee" number="6269"/>
+ <syscall name="vmsplice" number="6270"/>
+ <syscall name="move_pages" number="6271"/>
+ <syscall name="set_robust_list" number="6272"/>
+ <syscall name="get_robust_list" number="6273"/>
+ <syscall name="kexec_load" number="6274"/>
+ <syscall name="getcpu" number="6275"/>
+ <syscall name="epoll_pwait" number="6276"/>
+ <syscall name="ioprio_set" number="6277"/>
+ <syscall name="ioprio_get" number="6278"/>
+ <syscall name="utimensat" number="6279"/>
+ <syscall name="signalfd" number="6280"/>
+ <syscall name="timerfd" number="6281"/>
+ <syscall name="eventfd" number="6282"/>
+ <syscall name="fallocate" number="6283"/>
+ <syscall name="timerfd_create" number="6284"/>
+ <syscall name="timerfd_gettime" number="6285"/>
+ <syscall name="timerfd_settime" number="6286"/>
+ <syscall name="signalfd4" number="6287"/>
+ <syscall name="eventfd2" number="6288"/>
+ <syscall name="epoll_create1" number="6289"/>
+ <syscall name="dup3" number="6290"/>
+ <syscall name="pipe2" number="6291"/>
+ <syscall name="inotify_init1" number="6292"/>
+ <syscall name="preadv" number="6293"/>
+ <syscall name="pwritev" number="6294"/>
+ <syscall name="rt_tgsigqueueinfo" number="6295"/>
+ <syscall name="perf_event_open" number="6296"/>
+ <syscall name="accept4" number="6297"/>
+ <syscall name="recvmmsg" number="6298"/>
+ <syscall name="getdents64" number="6299"/>
+ <syscall name="fanotify_init" number="6300"/>
+ <syscall name="fanotify_mark" number="6301"/>
+ <syscall name="prlimit64" number="6302"/>
+</syscalls_info>
diff --git a/share/gdb/syscalls/mips-n64-linux.xml b/share/gdb/syscalls/mips-n64-linux.xml
new file mode 100644
index 0000000..896e0c0
--- /dev/null
+++ b/share/gdb/syscalls/mips-n64-linux.xml
@@ -0,0 +1,312 @@
+<?xml version="1.0"?>
+<!-- Copyright (C) 2011-2013 Free Software Foundation, Inc.
+
+ Copying and distribution of this file, with or without modification,
+ are permitted in any medium without royalty provided the copyright
+ notice and this notice are preserved. -->
+
+<!DOCTYPE feature SYSTEM "gdb-syscalls.dtd">
+
+<!-- This file was generated using the following file:
+
+ /usr/src/linux/arch/mips/include/asm/unistd.h
+
+ The file mentioned above belongs to the Linux Kernel. -->
+
+<syscalls_info>
+ <syscall name="read" number="5000"/>
+ <syscall name="write" number="5001"/>
+ <syscall name="open" number="5002"/>
+ <syscall name="close" number="5003"/>
+ <syscall name="stat" number="5004"/>
+ <syscall name="fstat" number="5005"/>
+ <syscall name="lstat" number="5006"/>
+ <syscall name="poll" number="5007"/>
+ <syscall name="lseek" number="5008"/>
+ <syscall name="mmap" number="5009"/>
+ <syscall name="mprotect" number="5010"/>
+ <syscall name="munmap" number="5011"/>
+ <syscall name="brk" number="5012"/>
+ <syscall name="rt_sigaction" number="5013"/>
+ <syscall name="rt_sigprocmask" number="5014"/>
+ <syscall name="ioctl" number="5015"/>
+ <syscall name="pread64" number="5016"/>
+ <syscall name="pwrite64" number="5017"/>
+ <syscall name="readv" number="5018"/>
+ <syscall name="writev" number="5019"/>
+ <syscall name="access" number="5020"/>
+ <syscall name="pipe" number="5021"/>
+ <syscall name="_newselect" number="5022"/>
+ <syscall name="sched_yield" number="5023"/>
+ <syscall name="mremap" number="5024"/>
+ <syscall name="msync" number="5025"/>
+ <syscall name="mincore" number="5026"/>
+ <syscall name="madvise" number="5027"/>
+ <syscall name="shmget" number="5028"/>
+ <syscall name="shmat" number="5029"/>
+ <syscall name="shmctl" number="5030"/>
+ <syscall name="dup" number="5031"/>
+ <syscall name="dup2" number="5032"/>
+ <syscall name="pause" number="5033"/>
+ <syscall name="nanosleep" number="5034"/>
+ <syscall name="getitimer" number="5035"/>
+ <syscall name="setitimer" number="5036"/>
+ <syscall name="alarm" number="5037"/>
+ <syscall name="getpid" number="5038"/>
+ <syscall name="sendfile" number="5039"/>
+ <syscall name="socket" number="5040"/>
+ <syscall name="connect" number="5041"/>
+ <syscall name="accept" number="5042"/>
+ <syscall name="sendto" number="5043"/>
+ <syscall name="recvfrom" number="5044"/>
+ <syscall name="sendmsg" number="5045"/>
+ <syscall name="recvmsg" number="5046"/>
+ <syscall name="shutdown" number="5047"/>
+ <syscall name="bind" number="5048"/>
+ <syscall name="listen" number="5049"/>
+ <syscall name="getsockname" number="5050"/>
+ <syscall name="getpeername" number="5051"/>
+ <syscall name="socketpair" number="5052"/>
+ <syscall name="setsockopt" number="5053"/>
+ <syscall name="getsockopt" number="5054"/>
+ <syscall name="clone" number="5055"/>
+ <syscall name="fork" number="5056"/>
+ <syscall name="execve" number="5057"/>
+ <syscall name="exit" number="5058"/>
+ <syscall name="wait4" number="5059"/>
+ <syscall name="kill" number="5060"/>
+ <syscall name="uname" number="5061"/>
+ <syscall name="semget" number="5062"/>
+ <syscall name="semop" number="5063"/>
+ <syscall name="semctl" number="5064"/>
+ <syscall name="shmdt" number="5065"/>
+ <syscall name="msgget" number="5066"/>
+ <syscall name="msgsnd" number="5067"/>
+ <syscall name="msgrcv" number="5068"/>
+ <syscall name="msgctl" number="5069"/>
+ <syscall name="fcntl" number="5070"/>
+ <syscall name="flock" number="5071"/>
+ <syscall name="fsync" number="5072"/>
+ <syscall name="fdatasync" number="5073"/>
+ <syscall name="truncate" number="5074"/>
+ <syscall name="ftruncate" number="5075"/>
+ <syscall name="getdents" number="5076"/>
+ <syscall name="getcwd" number="5077"/>
+ <syscall name="chdir" number="5078"/>
+ <syscall name="fchdir" number="5079"/>
+ <syscall name="rename" number="5080"/>
+ <syscall name="mkdir" number="5081"/>
+ <syscall name="rmdir" number="5082"/>
+ <syscall name="creat" number="5083"/>
+ <syscall name="link" number="5084"/>
+ <syscall name="unlink" number="5085"/>
+ <syscall name="symlink" number="5086"/>
+ <syscall name="readlink" number="5087"/>
+ <syscall name="chmod" number="5088"/>
+ <syscall name="fchmod" number="5089"/>
+ <syscall name="chown" number="5090"/>
+ <syscall name="fchown" number="5091"/>
+ <syscall name="lchown" number="5092"/>
+ <syscall name="umask" number="5093"/>
+ <syscall name="gettimeofday" number="5094"/>
+ <syscall name="getrlimit" number="5095"/>
+ <syscall name="getrusage" number="5096"/>
+ <syscall name="sysinfo" number="5097"/>
+ <syscall name="times" number="5098"/>
+ <syscall name="ptrace" number="5099"/>
+ <syscall name="getuid" number="5100"/>
+ <syscall name="syslog" number="5101"/>
+ <syscall name="getgid" number="5102"/>
+ <syscall name="setuid" number="5103"/>
+ <syscall name="setgid" number="5104"/>
+ <syscall name="geteuid" number="5105"/>
+ <syscall name="getegid" number="5106"/>
+ <syscall name="setpgid" number="5107"/>
+ <syscall name="getppid" number="5108"/>
+ <syscall name="getpgrp" number="5109"/>
+ <syscall name="setsid" number="5110"/>
+ <syscall name="setreuid" number="5111"/>
+ <syscall name="setregid" number="5112"/>
+ <syscall name="getgroups" number="5113"/>
+ <syscall name="setgroups" number="5114"/>
+ <syscall name="setresuid" number="5115"/>
+ <syscall name="getresuid" number="5116"/>
+ <syscall name="setresgid" number="5117"/>
+ <syscall name="getresgid" number="5118"/>
+ <syscall name="getpgid" number="5119"/>
+ <syscall name="setfsuid" number="5120"/>
+ <syscall name="setfsgid" number="5121"/>
+ <syscall name="getsid" number="5122"/>
+ <syscall name="capget" number="5123"/>
+ <syscall name="capset" number="5124"/>
+ <syscall name="rt_sigpending" number="5125"/>
+ <syscall name="rt_sigtimedwait" number="5126"/>
+ <syscall name="rt_sigqueueinfo" number="5127"/>
+ <syscall name="rt_sigsuspend" number="5128"/>
+ <syscall name="sigaltstack" number="5129"/>
+ <syscall name="utime" number="5130"/>
+ <syscall name="mknod" number="5131"/>
+ <syscall name="personality" number="5132"/>
+ <syscall name="ustat" number="5133"/>
+ <syscall name="statfs" number="5134"/>
+ <syscall name="fstatfs" number="5135"/>
+ <syscall name="sysfs" number="5136"/>
+ <syscall name="getpriority" number="5137"/>
+ <syscall name="setpriority" number="5138"/>
+ <syscall name="sched_setparam" number="5139"/>
+ <syscall name="sched_getparam" number="5140"/>
+ <syscall name="sched_setscheduler" number="5141"/>
+ <syscall name="sched_getscheduler" number="5142"/>
+ <syscall name="sched_get_priority_max" number="5143"/>
+ <syscall name="sched_get_priority_min" number="5144"/>
+ <syscall name="sched_rr_get_interval" number="5145"/>
+ <syscall name="mlock" number="5146"/>
+ <syscall name="munlock" number="5147"/>
+ <syscall name="mlockall" number="5148"/>
+ <syscall name="munlockall" number="5149"/>
+ <syscall name="vhangup" number="5150"/>
+ <syscall name="pivot_root" number="5151"/>
+ <syscall name="_sysctl" number="5152"/>
+ <syscall name="prctl" number="5153"/>
+ <syscall name="adjtimex" number="5154"/>
+ <syscall name="setrlimit" number="5155"/>
+ <syscall name="chroot" number="5156"/>
+ <syscall name="sync" number="5157"/>
+ <syscall name="acct" number="5158"/>
+ <syscall name="settimeofday" number="5159"/>
+ <syscall name="mount" number="5160"/>
+ <syscall name="umount2" number="5161"/>
+ <syscall name="swapon" number="5162"/>
+ <syscall name="swapoff" number="5163"/>
+ <syscall name="reboot" number="5164"/>
+ <syscall name="sethostname" number="5165"/>
+ <syscall name="setdomainname" number="5166"/>
+ <syscall name="create_module" number="5167"/>
+ <syscall name="init_module" number="5168"/>
+ <syscall name="delete_module" number="5169"/>
+ <syscall name="get_kernel_syms" number="5170"/>
+ <syscall name="query_module" number="5171"/>
+ <syscall name="quotactl" number="5172"/>
+ <syscall name="nfsservctl" number="5173"/>
+ <syscall name="getpmsg" number="5174"/>
+ <syscall name="putpmsg" number="5175"/>
+ <syscall name="afs_syscall" number="5176"/>
+ <syscall name="gettid" number="5178"/>
+ <syscall name="readahead" number="5179"/>
+ <syscall name="setxattr" number="5180"/>
+ <syscall name="lsetxattr" number="5181"/>
+ <syscall name="fsetxattr" number="5182"/>
+ <syscall name="getxattr" number="5183"/>
+ <syscall name="lgetxattr" number="5184"/>
+ <syscall name="fgetxattr" number="5185"/>
+ <syscall name="listxattr" number="5186"/>
+ <syscall name="llistxattr" number="5187"/>
+ <syscall name="flistxattr" number="5188"/>
+ <syscall name="removexattr" number="5189"/>
+ <syscall name="lremovexattr" number="5190"/>
+ <syscall name="fremovexattr" number="5191"/>
+ <syscall name="tkill" number="5192"/>
+ <syscall name="futex" number="5194"/>
+ <syscall name="sched_setaffinity" number="5195"/>
+ <syscall name="sched_getaffinity" number="5196"/>
+ <syscall name="cacheflush" number="5197"/>
+ <syscall name="cachectl" number="5198"/>
+ <syscall name="sysmips" number="5199"/>
+ <syscall name="io_setup" number="5200"/>
+ <syscall name="io_destroy" number="5201"/>
+ <syscall name="io_getevents" number="5202"/>
+ <syscall name="io_submit" number="5203"/>
+ <syscall name="io_cancel" number="5204"/>
+ <syscall name="exit_group" number="5205"/>
+ <syscall name="lookup_dcookie" number="5206"/>
+ <syscall name="epoll_create" number="5207"/>
+ <syscall name="epoll_ctl" number="5208"/>
+ <syscall name="epoll_wait" number="5209"/>
+ <syscall name="remap_file_pages" number="5210"/>
+ <syscall name="rt_sigreturn" number="5211"/>
+ <syscall name="set_tid_address" number="5212"/>
+ <syscall name="restart_syscall" number="5213"/>
+ <syscall name="semtimedop" number="5214"/>
+ <syscall name="fadvise64" number="5215"/>
+ <syscall name="timer_create" number="5216"/>
+ <syscall name="timer_settime" number="5217"/>
+ <syscall name="timer_gettime" number="5218"/>
+ <syscall name="timer_getoverrun" number="5219"/>
+ <syscall name="timer_delete" number="5220"/>
+ <syscall name="clock_settime" number="5221"/>
+ <syscall name="clock_gettime" number="5222"/>
+ <syscall name="clock_getres" number="5223"/>
+ <syscall name="clock_nanosleep" number="5224"/>
+ <syscall name="tgkill" number="5225"/>
+ <syscall name="utimes" number="5226"/>
+ <syscall name="mbind" number="5227"/>
+ <syscall name="get_mempolicy" number="5228"/>
+ <syscall name="set_mempolicy" number="5229"/>
+ <syscall name="mq_open" number="5230"/>
+ <syscall name="mq_unlink" number="5231"/>
+ <syscall name="mq_timedsend" number="5232"/>
+ <syscall name="mq_timedreceive" number="5233"/>
+ <syscall name="mq_notify" number="5234"/>
+ <syscall name="mq_getsetattr" number="5235"/>
+ <syscall name="vserver" number="5236"/>
+ <syscall name="waitid" number="5237"/>
+ <syscall name="add_key" number="5239"/>
+ <syscall name="request_key" number="5240"/>
+ <syscall name="keyctl" number="5241"/>
+ <syscall name="set_thread_area" number="5242"/>
+ <syscall name="inotify_init" number="5243"/>
+ <syscall name="inotify_add_watch" number="5244"/>
+ <syscall name="inotify_rm_watch" number="5245"/>
+ <syscall name="migrate_pages" number="5246"/>
+ <syscall name="openat" number="5247"/>
+ <syscall name="mkdirat" number="5248"/>
+ <syscall name="mknodat" number="5249"/>
+ <syscall name="fchownat" number="5250"/>
+ <syscall name="futimesat" number="5251"/>
+ <syscall name="newfstatat" number="5252"/>
+ <syscall name="unlinkat" number="5253"/>
+ <syscall name="renameat" number="5254"/>
+ <syscall name="linkat" number="5255"/>
+ <syscall name="symlinkat" number="5256"/>
+ <syscall name="readlinkat" number="5257"/>
+ <syscall name="fchmodat" number="5258"/>
+ <syscall name="faccessat" number="5259"/>
+ <syscall name="pselect6" number="5260"/>
+ <syscall name="ppoll" number="5261"/>
+ <syscall name="unshare" number="5262"/>
+ <syscall name="splice" number="5263"/>
+ <syscall name="sync_file_range" number="5264"/>
+ <syscall name="tee" number="5265"/>
+ <syscall name="vmsplice" number="5266"/>
+ <syscall name="move_pages" number="5267"/>
+ <syscall name="set_robust_list" number="5268"/>
+ <syscall name="get_robust_list" number="5269"/>
+ <syscall name="kexec_load" number="5270"/>
+ <syscall name="getcpu" number="5271"/>
+ <syscall name="epoll_pwait" number="5272"/>
+ <syscall name="ioprio_set" number="5273"/>
+ <syscall name="ioprio_get" number="5274"/>
+ <syscall name="utimensat" number="5275"/>
+ <syscall name="signalfd" number="5276"/>
+ <syscall name="timerfd" number="5277"/>
+ <syscall name="eventfd" number="5278"/>
+ <syscall name="fallocate" number="5279"/>
+ <syscall name="timerfd_create" number="5280"/>
+ <syscall name="timerfd_gettime" number="5281"/>
+ <syscall name="timerfd_settime" number="5282"/>
+ <syscall name="signalfd4" number="5283"/>
+ <syscall name="eventfd2" number="5284"/>
+ <syscall name="epoll_create1" number="5285"/>
+ <syscall name="dup3" number="5286"/>
+ <syscall name="pipe2" number="5287"/>
+ <syscall name="inotify_init1" number="5288"/>
+ <syscall name="preadv" number="5289"/>
+ <syscall name="pwritev" number="5290"/>
+ <syscall name="rt_tgsigqueueinfo" number="5291"/>
+ <syscall name="perf_event_open" number="5292"/>
+ <syscall name="accept4" number="5293"/>
+ <syscall name="recvmmsg" number="5294"/>
+ <syscall name="fanotify_init" number="5295"/>
+ <syscall name="fanotify_mark" number="5296"/>
+ <syscall name="prlimit64" number="5297"/>
+</syscalls_info>
diff --git a/share/gdb/syscalls/mips-o32-linux.xml b/share/gdb/syscalls/mips-o32-linux.xml
new file mode 100644
index 0000000..2b11247
--- /dev/null
+++ b/share/gdb/syscalls/mips-o32-linux.xml
@@ -0,0 +1,347 @@
+<?xml version="1.0"?>
+<!-- Copyright (C) 2011-2013 Free Software Foundation, Inc.
+
+ Copying and distribution of this file, with or without modification,
+ are permitted in any medium without royalty provided the copyright
+ notice and this notice are preserved. -->
+
+<!DOCTYPE feature SYSTEM "gdb-syscalls.dtd">
+
+<!-- This file was generated using the following file:
+
+ /usr/src/linux/arch/mips/include/asm/unistd.h
+
+ The file mentioned above belongs to the Linux Kernel. -->
+
+<syscalls_info>
+ <syscall name="syscall" number="4000"/>
+ <syscall name="exit" number="4001"/>
+ <syscall name="fork" number="4002"/>
+ <syscall name="read" number="4003"/>
+ <syscall name="write" number="4004"/>
+ <syscall name="open" number="4005"/>
+ <syscall name="close" number="4006"/>
+ <syscall name="waitpid" number="4007"/>
+ <syscall name="creat" number="4008"/>
+ <syscall name="link" number="4009"/>
+ <syscall name="unlink" number="4010"/>
+ <syscall name="execve" number="4011"/>
+ <syscall name="chdir" number="4012"/>
+ <syscall name="time" number="4013"/>
+ <syscall name="mknod" number="4014"/>
+ <syscall name="chmod" number="4015"/>
+ <syscall name="lchown" number="4016"/>
+ <syscall name="break" number="4017"/>
+ <syscall name="lseek" number="4019"/>
+ <syscall name="getpid" number="4020"/>
+ <syscall name="mount" number="4021"/>
+ <syscall name="umount" number="4022"/>
+ <syscall name="setuid" number="4023"/>
+ <syscall name="getuid" number="4024"/>
+ <syscall name="stime" number="4025"/>
+ <syscall name="ptrace" number="4026"/>
+ <syscall name="alarm" number="4027"/>
+ <syscall name="pause" number="4029"/>
+ <syscall name="utime" number="4030"/>
+ <syscall name="stty" number="4031"/>
+ <syscall name="gtty" number="4032"/>
+ <syscall name="access" number="4033"/>
+ <syscall name="nice" number="4034"/>
+ <syscall name="ftime" number="4035"/>
+ <syscall name="sync" number="4036"/>
+ <syscall name="kill" number="4037"/>
+ <syscall name="rename" number="4038"/>
+ <syscall name="mkdir" number="4039"/>
+ <syscall name="rmdir" number="4040"/>
+ <syscall name="dup" number="4041"/>
+ <syscall name="pipe" number="4042"/>
+ <syscall name="times" number="4043"/>
+ <syscall name="prof" number="4044"/>
+ <syscall name="brk" number="4045"/>
+ <syscall name="setgid" number="4046"/>
+ <syscall name="getgid" number="4047"/>
+ <syscall name="signal" number="4048"/>
+ <syscall name="geteuid" number="4049"/>
+ <syscall name="getegid" number="4050"/>
+ <syscall name="acct" number="4051"/>
+ <syscall name="umount2" number="4052"/>
+ <syscall name="lock" number="4053"/>
+ <syscall name="ioctl" number="4054"/>
+ <syscall name="fcntl" number="4055"/>
+ <syscall name="mpx" number="4056"/>
+ <syscall name="setpgid" number="4057"/>
+ <syscall name="ulimit" number="4058"/>
+ <syscall name="umask" number="4060"/>
+ <syscall name="chroot" number="4061"/>
+ <syscall name="ustat" number="4062"/>
+ <syscall name="dup2" number="4063"/>
+ <syscall name="getppid" number="4064"/>
+ <syscall name="getpgrp" number="4065"/>
+ <syscall name="setsid" number="4066"/>
+ <syscall name="sigaction" number="4067"/>
+ <syscall name="sgetmask" number="4068"/>
+ <syscall name="ssetmask" number="4069"/>
+ <syscall name="setreuid" number="4070"/>
+ <syscall name="setregid" number="4071"/>
+ <syscall name="sigsuspend" number="4072"/>
+ <syscall name="sigpending" number="4073"/>
+ <syscall name="sethostname" number="4074"/>
+ <syscall name="setrlimit" number="4075"/>
+ <syscall name="getrlimit" number="4076"/>
+ <syscall name="getrusage" number="4077"/>
+ <syscall name="gettimeofday" number="4078"/>
+ <syscall name="settimeofday" number="4079"/>
+ <syscall name="getgroups" number="4080"/>
+ <syscall name="setgroups" number="4081"/>
+ <syscall name="symlink" number="4083"/>
+ <syscall name="readlink" number="4085"/>
+ <syscall name="uselib" number="4086"/>
+ <syscall name="swapon" number="4087"/>
+ <syscall name="reboot" number="4088"/>
+ <syscall name="readdir" number="4089"/>
+ <syscall name="mmap" number="4090"/>
+ <syscall name="munmap" number="4091"/>
+ <syscall name="truncate" number="4092"/>
+ <syscall name="ftruncate" number="4093"/>
+ <syscall name="fchmod" number="4094"/>
+ <syscall name="fchown" number="4095"/>
+ <syscall name="getpriority" number="4096"/>
+ <syscall name="setpriority" number="4097"/>
+ <syscall name="profil" number="4098"/>
+ <syscall name="statfs" number="4099"/>
+ <syscall name="fstatfs" number="4100"/>
+ <syscall name="ioperm" number="4101"/>
+ <syscall name="socketcall" number="4102"/>
+ <syscall name="syslog" number="4103"/>
+ <syscall name="setitimer" number="4104"/>
+ <syscall name="getitimer" number="4105"/>
+ <syscall name="stat" number="4106"/>
+ <syscall name="lstat" number="4107"/>
+ <syscall name="fstat" number="4108"/>
+ <syscall name="iopl" number="4110"/>
+ <syscall name="vhangup" number="4111"/>
+ <syscall name="idle" number="4112"/>
+ <syscall name="vm86" number="4113"/>
+ <syscall name="wait4" number="4114"/>
+ <syscall name="swapoff" number="4115"/>
+ <syscall name="sysinfo" number="4116"/>
+ <syscall name="ipc" number="4117"/>
+ <syscall name="fsync" number="4118"/>
+ <syscall name="sigreturn" number="4119"/>
+ <syscall name="clone" number="4120"/>
+ <syscall name="setdomainname" number="4121"/>
+ <syscall name="uname" number="4122"/>
+ <syscall name="modify_ldt" number="4123"/>
+ <syscall name="adjtimex" number="4124"/>
+ <syscall name="mprotect" number="4125"/>
+ <syscall name="sigprocmask" number="4126"/>
+ <syscall name="create_module" number="4127"/>
+ <syscall name="init_module" number="4128"/>
+ <syscall name="delete_module" number="4129"/>
+ <syscall name="get_kernel_syms" number="4130"/>
+ <syscall name="quotactl" number="4131"/>
+ <syscall name="getpgid" number="4132"/>
+ <syscall name="fchdir" number="4133"/>
+ <syscall name="bdflush" number="4134"/>
+ <syscall name="sysfs" number="4135"/>
+ <syscall name="personality" number="4136"/>
+ <syscall name="afs_syscall" number="4137"/>
+ <syscall name="setfsuid" number="4138"/>
+ <syscall name="setfsgid" number="4139"/>
+ <syscall name="_llseek" number="4140"/>
+ <syscall name="getdents" number="4141"/>
+ <syscall name="_newselect" number="4142"/>
+ <syscall name="flock" number="4143"/>
+ <syscall name="msync" number="4144"/>
+ <syscall name="readv" number="4145"/>
+ <syscall name="writev" number="4146"/>
+ <syscall name="cacheflush" number="4147"/>
+ <syscall name="cachectl" number="4148"/>
+ <syscall name="sysmips" number="4149"/>
+ <syscall name="getsid" number="4151"/>
+ <syscall name="fdatasync" number="4152"/>
+ <syscall name="_sysctl" number="4153"/>
+ <syscall name="mlock" number="4154"/>
+ <syscall name="munlock" number="4155"/>
+ <syscall name="mlockall" number="4156"/>
+ <syscall name="munlockall" number="4157"/>
+ <syscall name="sched_setparam" number="4158"/>
+ <syscall name="sched_getparam" number="4159"/>
+ <syscall name="sched_setscheduler" number="4160"/>
+ <syscall name="sched_getscheduler" number="4161"/>
+ <syscall name="sched_yield" number="4162"/>
+ <syscall name="sched_get_priority_max" number="4163"/>
+ <syscall name="sched_get_priority_min" number="4164"/>
+ <syscall name="sched_rr_get_interval" number="4165"/>
+ <syscall name="nanosleep" number="4166"/>
+ <syscall name="mremap" number="4167"/>
+ <syscall name="accept" number="4168"/>
+ <syscall name="bind" number="4169"/>
+ <syscall name="connect" number="4170"/>
+ <syscall name="getpeername" number="4171"/>
+ <syscall name="getsockname" number="4172"/>
+ <syscall name="getsockopt" number="4173"/>
+ <syscall name="listen" number="4174"/>
+ <syscall name="recv" number="4175"/>
+ <syscall name="recvfrom" number="4176"/>
+ <syscall name="recvmsg" number="4177"/>
+ <syscall name="send" number="4178"/>
+ <syscall name="sendmsg" number="4179"/>
+ <syscall name="sendto" number="4180"/>
+ <syscall name="setsockopt" number="4181"/>
+ <syscall name="shutdown" number="4182"/>
+ <syscall name="socket" number="4183"/>
+ <syscall name="socketpair" number="4184"/>
+ <syscall name="setresuid" number="4185"/>
+ <syscall name="getresuid" number="4186"/>
+ <syscall name="query_module" number="4187"/>
+ <syscall name="poll" number="4188"/>
+ <syscall name="nfsservctl" number="4189"/>
+ <syscall name="setresgid" number="4190"/>
+ <syscall name="getresgid" number="4191"/>
+ <syscall name="prctl" number="4192"/>
+ <syscall name="rt_sigreturn" number="4193"/>
+ <syscall name="rt_sigaction" number="4194"/>
+ <syscall name="rt_sigprocmask" number="4195"/>
+ <syscall name="rt_sigpending" number="4196"/>
+ <syscall name="rt_sigtimedwait" number="4197"/>
+ <syscall name="rt_sigqueueinfo" number="4198"/>
+ <syscall name="rt_sigsuspend" number="4199"/>
+ <syscall name="pread64" number="4200"/>
+ <syscall name="pwrite64" number="4201"/>
+ <syscall name="chown" number="4202"/>
+ <syscall name="getcwd" number="4203"/>
+ <syscall name="capget" number="4204"/>
+ <syscall name="capset" number="4205"/>
+ <syscall name="sigaltstack" number="4206"/>
+ <syscall name="sendfile" number="4207"/>
+ <syscall name="getpmsg" number="4208"/>
+ <syscall name="putpmsg" number="4209"/>
+ <syscall name="mmap2" number="4210"/>
+ <syscall name="truncate64" number="4211"/>
+ <syscall name="ftruncate64" number="4212"/>
+ <syscall name="stat64" number="4213"/>
+ <syscall name="lstat64" number="4214"/>
+ <syscall name="fstat64" number="4215"/>
+ <syscall name="pivot_root" number="4216"/>
+ <syscall name="mincore" number="4217"/>
+ <syscall name="madvise" number="4218"/>
+ <syscall name="getdents64" number="4219"/>
+ <syscall name="fcntl64" number="4220"/>
+ <syscall name="gettid" number="4222"/>
+ <syscall name="readahead" number="4223"/>
+ <syscall name="setxattr" number="4224"/>
+ <syscall name="lsetxattr" number="4225"/>
+ <syscall name="fsetxattr" number="4226"/>
+ <syscall name="getxattr" number="4227"/>
+ <syscall name="lgetxattr" number="4228"/>
+ <syscall name="fgetxattr" number="4229"/>
+ <syscall name="listxattr" number="4230"/>
+ <syscall name="llistxattr" number="4231"/>
+ <syscall name="flistxattr" number="4232"/>
+ <syscall name="removexattr" number="4233"/>
+ <syscall name="lremovexattr" number="4234"/>
+ <syscall name="fremovexattr" number="4235"/>
+ <syscall name="tkill" number="4236"/>
+ <syscall name="sendfile64" number="4237"/>
+ <syscall name="futex" number="4238"/>
+ <syscall name="sched_setaffinity" number="4239"/>
+ <syscall name="sched_getaffinity" number="4240"/>
+ <syscall name="io_setup" number="4241"/>
+ <syscall name="io_destroy" number="4242"/>
+ <syscall name="io_getevents" number="4243"/>
+ <syscall name="io_submit" number="4244"/>
+ <syscall name="io_cancel" number="4245"/>
+ <syscall name="exit_group" number="4246"/>
+ <syscall name="lookup_dcookie" number="4247"/>
+ <syscall name="epoll_create" number="4248"/>
+ <syscall name="epoll_ctl" number="4249"/>
+ <syscall name="epoll_wait" number="4250"/>
+ <syscall name="remap_file_pages" number="4251"/>
+ <syscall name="set_tid_address" number="4252"/>
+ <syscall name="restart_syscall" number="4253"/>
+ <syscall name="fadvise64" number="4254"/>
+ <syscall name="statfs64" number="4255"/>
+ <syscall name="fstatfs64" number="4256"/>
+ <syscall name="timer_create" number="4257"/>
+ <syscall name="timer_settime" number="4258"/>
+ <syscall name="timer_gettime" number="4259"/>
+ <syscall name="timer_getoverrun" number="4260"/>
+ <syscall name="timer_delete" number="4261"/>
+ <syscall name="clock_settime" number="4262"/>
+ <syscall name="clock_gettime" number="4263"/>
+ <syscall name="clock_getres" number="4264"/>
+ <syscall name="clock_nanosleep" number="4265"/>
+ <syscall name="tgkill" number="4266"/>
+ <syscall name="utimes" number="4267"/>
+ <syscall name="mbind" number="4268"/>
+ <syscall name="get_mempolicy" number="4269"/>
+ <syscall name="set_mempolicy" number="4270"/>
+ <syscall name="mq_open" number="4271"/>
+ <syscall name="mq_unlink" number="4272"/>
+ <syscall name="mq_timedsend" number="4273"/>
+ <syscall name="mq_timedreceive" number="4274"/>
+ <syscall name="mq_notify" number="4275"/>
+ <syscall name="mq_getsetattr" number="4276"/>
+ <syscall name="vserver" number="4277"/>
+ <syscall name="waitid" number="4278"/>
+ <syscall name="add_key" number="4280"/>
+ <syscall name="request_key" number="4281"/>
+ <syscall name="keyctl" number="4282"/>
+ <syscall name="set_thread_area" number="4283"/>
+ <syscall name="inotify_init" number="4284"/>
+ <syscall name="inotify_add_watch" number="4285"/>
+ <syscall name="inotify_rm_watch" number="4286"/>
+ <syscall name="migrate_pages" number="4287"/>
+ <syscall name="openat" number="4288"/>
+ <syscall name="mkdirat" number="4289"/>
+ <syscall name="mknodat" number="4290"/>
+ <syscall name="fchownat" number="4291"/>
+ <syscall name="futimesat" number="4292"/>
+ <syscall name="fstatat64" number="4293"/>
+ <syscall name="unlinkat" number="4294"/>
+ <syscall name="renameat" number="4295"/>
+ <syscall name="linkat" number="4296"/>
+ <syscall name="symlinkat" number="4297"/>
+ <syscall name="readlinkat" number="4298"/>
+ <syscall name="fchmodat" number="4299"/>
+ <syscall name="faccessat" number="4300"/>
+ <syscall name="pselect6" number="4301"/>
+ <syscall name="ppoll" number="4302"/>
+ <syscall name="unshare" number="4303"/>
+ <syscall name="splice" number="4304"/>
+ <syscall name="sync_file_range" number="4305"/>
+ <syscall name="tee" number="4306"/>
+ <syscall name="vmsplice" number="4307"/>
+ <syscall name="move_pages" number="4308"/>
+ <syscall name="set_robust_list" number="4309"/>
+ <syscall name="get_robust_list" number="4310"/>
+ <syscall name="kexec_load" number="4311"/>
+ <syscall name="getcpu" number="4312"/>
+ <syscall name="epoll_pwait" number="4313"/>
+ <syscall name="ioprio_set" number="4314"/>
+ <syscall name="ioprio_get" number="4315"/>
+ <syscall name="utimensat" number="4316"/>
+ <syscall name="signalfd" number="4317"/>
+ <syscall name="timerfd" number="4318"/>
+ <syscall name="eventfd" number="4319"/>
+ <syscall name="fallocate" number="4320"/>
+ <syscall name="timerfd_create" number="4321"/>
+ <syscall name="timerfd_gettime" number="4322"/>
+ <syscall name="timerfd_settime" number="4323"/>
+ <syscall name="signalfd4" number="4324"/>
+ <syscall name="eventfd2" number="4325"/>
+ <syscall name="epoll_create1" number="4326"/>
+ <syscall name="dup3" number="4327"/>
+ <syscall name="pipe2" number="4328"/>
+ <syscall name="inotify_init1" number="4329"/>
+ <syscall name="preadv" number="4330"/>
+ <syscall name="pwritev" number="4331"/>
+ <syscall name="rt_tgsigqueueinfo" number="4332"/>
+ <syscall name="perf_event_open" number="4333"/>
+ <syscall name="accept4" number="4334"/>
+ <syscall name="recvmmsg" number="4335"/>
+ <syscall name="fanotify_init" number="4336"/>
+ <syscall name="fanotify_mark" number="4337"/>
+ <syscall name="prlimit64" number="4338"/>
+</syscalls_info>
diff --git a/share/gdb/syscalls/ppc-linux.xml b/share/gdb/syscalls/ppc-linux.xml
new file mode 100644
index 0000000..dd4eba6
--- /dev/null
+++ b/share/gdb/syscalls/ppc-linux.xml
@@ -0,0 +1,310 @@
+<?xml version="1.0"?>
+<!-- Copyright (C) 2009-2013 Free Software Foundation, Inc.
+
+ Copying and distribution of this file, with or without modification,
+ are permitted in any medium without royalty provided the copyright
+ notice and this notice are preserved. -->
+
+<!DOCTYPE feature SYSTEM "gdb-syscalls.dtd">
+
+<!-- This file was generated using the following file:
+
+ /usr/src/linux/arch/powerpc/include/asm/unistd.h
+
+ The file mentioned above belongs to the Linux Kernel. -->
+
+<syscalls_info>
+ <syscall name="restart_syscall" number="0"/>
+ <syscall name="exit" number="1"/>
+ <syscall name="fork" number="2"/>
+ <syscall name="read" number="3"/>
+ <syscall name="write" number="4"/>
+ <syscall name="open" number="5"/>
+ <syscall name="close" number="6"/>
+ <syscall name="waitpid" number="7"/>
+ <syscall name="creat" number="8"/>
+ <syscall name="link" number="9"/>
+ <syscall name="unlink" number="10"/>
+ <syscall name="execve" number="11"/>
+ <syscall name="chdir" number="12"/>
+ <syscall name="time" number="13"/>
+ <syscall name="mknod" number="14"/>
+ <syscall name="chmod" number="15"/>
+ <syscall name="lchown" number="16"/>
+ <syscall name="break" number="17"/>
+ <syscall name="oldstat" number="18"/>
+ <syscall name="lseek" number="19"/>
+ <syscall name="getpid" number="20"/>
+ <syscall name="mount" number="21"/>
+ <syscall name="umount" number="22"/>
+ <syscall name="setuid" number="23"/>
+ <syscall name="getuid" number="24"/>
+ <syscall name="stime" number="25"/>
+ <syscall name="ptrace" number="26"/>
+ <syscall name="alarm" number="27"/>
+ <syscall name="oldfstat" number="28"/>
+ <syscall name="pause" number="29"/>
+ <syscall name="utime" number="30"/>
+ <syscall name="stty" number="31"/>
+ <syscall name="gtty" number="32"/>
+ <syscall name="access" number="33"/>
+ <syscall name="nice" number="34"/>
+ <syscall name="ftime" number="35"/>
+ <syscall name="sync" number="36"/>
+ <syscall name="kill" number="37"/>
+ <syscall name="rename" number="38"/>
+ <syscall name="mkdir" number="39"/>
+ <syscall name="rmdir" number="40"/>
+ <syscall name="dup" number="41"/>
+ <syscall name="pipe" number="42"/>
+ <syscall name="times" number="43"/>
+ <syscall name="prof" number="44"/>
+ <syscall name="brk" number="45"/>
+ <syscall name="setgid" number="46"/>
+ <syscall name="getgid" number="47"/>
+ <syscall name="signal" number="48"/>
+ <syscall name="geteuid" number="49"/>
+ <syscall name="getegid" number="50"/>
+ <syscall name="acct" number="51"/>
+ <syscall name="umount2" number="52"/>
+ <syscall name="lock" number="53"/>
+ <syscall name="ioctl" number="54"/>
+ <syscall name="fcntl" number="55"/>
+ <syscall name="mpx" number="56"/>
+ <syscall name="setpgid" number="57"/>
+ <syscall name="ulimit" number="58"/>
+ <syscall name="oldolduname" number="59"/>
+ <syscall name="umask" number="60"/>
+ <syscall name="chroot" number="61"/>
+ <syscall name="ustat" number="62"/>
+ <syscall name="dup2" number="63"/>
+ <syscall name="getppid" number="64"/>
+ <syscall name="getpgrp" number="65"/>
+ <syscall name="setsid" number="66"/>
+ <syscall name="sigaction" number="67"/>
+ <syscall name="sgetmask" number="68"/>
+ <syscall name="ssetmask" number="69"/>
+ <syscall name="setreuid" number="70"/>
+ <syscall name="setregid" number="71"/>
+ <syscall name="sigsuspend" number="72"/>
+ <syscall name="sigpending" number="73"/>
+ <syscall name="sethostname" number="74"/>
+ <syscall name="setrlimit" number="75"/>
+ <syscall name="getrlimit" number="76"/>
+ <syscall name="getrusage" number="77"/>
+ <syscall name="gettimeofday" number="78"/>
+ <syscall name="settimeofday" number="79"/>
+ <syscall name="getgroups" number="80"/>
+ <syscall name="setgroups" number="81"/>
+ <syscall name="select" number="82"/>
+ <syscall name="symlink" number="83"/>
+ <syscall name="oldlstat" number="84"/>
+ <syscall name="readlink" number="85"/>
+ <syscall name="uselib" number="86"/>
+ <syscall name="swapon" number="87"/>
+ <syscall name="reboot" number="88"/>
+ <syscall name="readdir" number="89"/>
+ <syscall name="mmap" number="90"/>
+ <syscall name="munmap" number="91"/>
+ <syscall name="truncate" number="92"/>
+ <syscall name="ftruncate" number="93"/>
+ <syscall name="fchmod" number="94"/>
+ <syscall name="fchown" number="95"/>
+ <syscall name="getpriority" number="96"/>
+ <syscall name="setpriority" number="97"/>
+ <syscall name="profil" number="98"/>
+ <syscall name="statfs" number="99"/>
+ <syscall name="fstatfs" number="100"/>
+ <syscall name="ioperm" number="101"/>
+ <syscall name="socketcall" number="102"/>
+ <syscall name="syslog" number="103"/>
+ <syscall name="setitimer" number="104"/>
+ <syscall name="getitimer" number="105"/>
+ <syscall name="stat" number="106"/>
+ <syscall name="lstat" number="107"/>
+ <syscall name="fstat" number="108"/>
+ <syscall name="olduname" number="109"/>
+ <syscall name="iopl" number="110"/>
+ <syscall name="vhangup" number="111"/>
+ <syscall name="idle" number="112"/>
+ <syscall name="vm86" number="113"/>
+ <syscall name="wait4" number="114"/>
+ <syscall name="swapoff" number="115"/>
+ <syscall name="sysinfo" number="116"/>
+ <syscall name="ipc" number="117"/>
+ <syscall name="fsync" number="118"/>
+ <syscall name="sigreturn" number="119"/>
+ <syscall name="clone" number="120"/>
+ <syscall name="setdomainname" number="121"/>
+ <syscall name="uname" number="122"/>
+ <syscall name="modify_ldt" number="123"/>
+ <syscall name="adjtimex" number="124"/>
+ <syscall name="mprotect" number="125"/>
+ <syscall name="sigprocmask" number="126"/>
+ <syscall name="create_module" number="127"/>
+ <syscall name="init_module" number="128"/>
+ <syscall name="delete_module" number="129"/>
+ <syscall name="get_kernel_syms" number="130"/>
+ <syscall name="quotactl" number="131"/>
+ <syscall name="getpgid" number="132"/>
+ <syscall name="fchdir" number="133"/>
+ <syscall name="bdflush" number="134"/>
+ <syscall name="sysfs" number="135"/>
+ <syscall name="personality" number="136"/>
+ <syscall name="afs_syscall" number="137"/>
+ <syscall name="setfsuid" number="138"/>
+ <syscall name="setfsgid" number="139"/>
+ <syscall name="_llseek" number="140"/>
+ <syscall name="getdents" number="141"/>
+ <syscall name="_newselect" number="142"/>
+ <syscall name="flock" number="143"/>
+ <syscall name="msync" number="144"/>
+ <syscall name="readv" number="145"/>
+ <syscall name="writev" number="146"/>
+ <syscall name="getsid" number="147"/>
+ <syscall name="fdatasync" number="148"/>
+ <syscall name="_sysctl" number="149"/>
+ <syscall name="mlock" number="150"/>
+ <syscall name="munlock" number="151"/>
+ <syscall name="mlockall" number="152"/>
+ <syscall name="munlockall" number="153"/>
+ <syscall name="sched_setparam" number="154"/>
+ <syscall name="sched_getparam" number="155"/>
+ <syscall name="sched_setscheduler" number="156"/>
+ <syscall name="sched_getscheduler" number="157"/>
+ <syscall name="sched_yield" number="158"/>
+ <syscall name="sched_get_priority_max" number="159"/>
+ <syscall name="sched_get_priority_min" number="160"/>
+ <syscall name="sched_rr_get_interval" number="161"/>
+ <syscall name="nanosleep" number="162"/>
+ <syscall name="mremap" number="163"/>
+ <syscall name="setresuid" number="164"/>
+ <syscall name="getresuid" number="165"/>
+ <syscall name="query_module" number="166"/>
+ <syscall name="poll" number="167"/>
+ <syscall name="nfsservctl" number="168"/>
+ <syscall name="setresgid" number="169"/>
+ <syscall name="getresgid" number="170"/>
+ <syscall name="prctl" number="171"/>
+ <syscall name="rt_sigreturn" number="172"/>
+ <syscall name="rt_sigaction" number="173"/>
+ <syscall name="rt_sigprocmask" number="174"/>
+ <syscall name="rt_sigpending" number="175"/>
+ <syscall name="rt_sigtimedwait" number="176"/>
+ <syscall name="rt_sigqueueinfo" number="177"/>
+ <syscall name="rt_sigsuspend" number="178"/>
+ <syscall name="pread64" number="179"/>
+ <syscall name="pwrite64" number="180"/>
+ <syscall name="chown" number="181"/>
+ <syscall name="getcwd" number="182"/>
+ <syscall name="capget" number="183"/>
+ <syscall name="capset" number="184"/>
+ <syscall name="sigaltstack" number="185"/>
+ <syscall name="sendfile" number="186"/>
+ <syscall name="getpmsg" number="187"/>
+ <syscall name="putpmsg" number="188"/>
+ <syscall name="vfork" number="189"/>
+ <syscall name="ugetrlimit" number="190"/>
+ <syscall name="readahead" number="191"/>
+ <syscall name="mmap2" number="192"/>
+ <syscall name="truncate64" number="193"/>
+ <syscall name="ftruncate64" number="194"/>
+ <syscall name="stat64" number="195"/>
+ <syscall name="lstat64" number="196"/>
+ <syscall name="fstat64" number="197"/>
+ <syscall name="pciconfig_read" number="198"/>
+ <syscall name="pciconfig_write" number="199"/>
+ <syscall name="pciconfig_iobase" number="200"/>
+ <syscall name="multiplexer" number="201"/>
+ <syscall name="getdents64" number="202"/>
+ <syscall name="pivot_root" number="203"/>
+ <syscall name="fcntl64" number="204"/>
+ <syscall name="madvise" number="205"/>
+ <syscall name="mincore" number="206"/>
+ <syscall name="gettid" number="207"/>
+ <syscall name="tkill" number="208"/>
+ <syscall name="setxattr" number="209"/>
+ <syscall name="lsetxattr" number="210"/>
+ <syscall name="fsetxattr" number="211"/>
+ <syscall name="getxattr" number="212"/>
+ <syscall name="lgetxattr" number="213"/>
+ <syscall name="fgetxattr" number="214"/>
+ <syscall name="listxattr" number="215"/>
+ <syscall name="llistxattr" number="216"/>
+ <syscall name="flistxattr" number="217"/>
+ <syscall name="removexattr" number="218"/>
+ <syscall name="lremovexattr" number="219"/>
+ <syscall name="fremovexattr" number="220"/>
+ <syscall name="futex" number="221"/>
+ <syscall name="sched_setaffinity" number="222"/>
+ <syscall name="sched_getaffinity" number="223"/>
+ <syscall name="tuxcall" number="225"/>
+ <syscall name="sendfile64" number="226"/>
+ <syscall name="io_setup" number="227"/>
+ <syscall name="io_destroy" number="228"/>
+ <syscall name="io_getevents" number="229"/>
+ <syscall name="io_submit" number="230"/>
+ <syscall name="io_cancel" number="231"/>
+ <syscall name="set_tid_address" number="232"/>
+ <syscall name="fadvise64" number="233"/>
+ <syscall name="exit_group" number="234"/>
+ <syscall name="lookup_dcookie" number="235"/>
+ <syscall name="epoll_create" number="236"/>
+ <syscall name="epoll_ctl" number="237"/>
+ <syscall name="epoll_wait" number="238"/>
+ <syscall name="remap_file_pages" number="239"/>
+ <syscall name="timer_create" number="240"/>
+ <syscall name="timer_settime" number="241"/>
+ <syscall name="timer_gettime" number="242"/>
+ <syscall name="timer_getoverrun" number="243"/>
+ <syscall name="timer_delete" number="244"/>
+ <syscall name="clock_settime" number="245"/>
+ <syscall name="clock_gettime" number="246"/>
+ <syscall name="clock_getres" number="247"/>
+ <syscall name="clock_nanosleep" number="248"/>
+ <syscall name="swapcontext" number="249"/>
+ <syscall name="tgkill" number="250"/>
+ <syscall name="utimes" number="251"/>
+ <syscall name="statfs64" number="252"/>
+ <syscall name="fstatfs64" number="253"/>
+ <syscall name="fadvise64_64" number="254"/>
+ <syscall name="rtas" number="255"/>
+ <syscall name="sys_debug_setcontext" number="256"/>
+ <syscall name="mbind" number="259"/>
+ <syscall name="get_mempolicy" number="260"/>
+ <syscall name="set_mempolicy" number="261"/>
+ <syscall name="mq_open" number="262"/>
+ <syscall name="mq_unlink" number="263"/>
+ <syscall name="mq_timedsend" number="264"/>
+ <syscall name="mq_timedreceive" number="265"/>
+ <syscall name="mq_notify" number="266"/>
+ <syscall name="mq_getsetattr" number="267"/>
+ <syscall name="kexec_load" number="268"/>
+ <syscall name="add_key" number="269"/>
+ <syscall name="request_key" number="270"/>
+ <syscall name="keyctl" number="271"/>
+ <syscall name="waitid" number="272"/>
+ <syscall name="ioprio_set" number="273"/>
+ <syscall name="ioprio_get" number="274"/>
+ <syscall name="inotify_init" number="275"/>
+ <syscall name="inotify_add_watch" number="276"/>
+ <syscall name="inotify_rm_watch" number="277"/>
+ <syscall name="spu_run" number="278"/>
+ <syscall name="spu_create" number="279"/>
+ <syscall name="pselect6" number="280"/>
+ <syscall name="ppoll" number="281"/>
+ <syscall name="unshare" number="282"/>
+ <syscall name="openat" number="286"/>
+ <syscall name="mkdirat" number="287"/>
+ <syscall name="mknodat" number="288"/>
+ <syscall name="fchownat" number="289"/>
+ <syscall name="futimesat" number="290"/>
+ <syscall name="fstatat64" number="291"/>
+ <syscall name="unlinkat" number="292"/>
+ <syscall name="renameat" number="293"/>
+ <syscall name="linkat" number="294"/>
+ <syscall name="symlinkat" number="295"/>
+ <syscall name="readlinkat" number="296"/>
+ <syscall name="fchmodat" number="297"/>
+ <syscall name="faccessat" number="298"/>
+</syscalls_info>
diff --git a/share/gdb/syscalls/ppc64-linux.xml b/share/gdb/syscalls/ppc64-linux.xml
new file mode 100644
index 0000000..ad56db1
--- /dev/null
+++ b/share/gdb/syscalls/ppc64-linux.xml
@@ -0,0 +1,295 @@
+<?xml version="1.0"?>
+<!-- Copyright (C) 2009-2013 Free Software Foundation, Inc.
+
+ Copying and distribution of this file, with or without modification,
+ are permitted in any medium without royalty provided the copyright
+ notice and this notice are preserved. -->
+
+<!DOCTYPE feature SYSTEM "gdb-syscalls.dtd">
+
+<!-- This file was generated using the following file:
+
+ /usr/src/linux/arch/powerpc/include/asm/unistd.h
+
+ The file mentioned above belongs to the Linux Kernel. -->
+
+<syscalls_info>
+ <syscall name="restart_syscall" number="0"/>
+ <syscall name="exit" number="1"/>
+ <syscall name="fork" number="2"/>
+ <syscall name="read" number="3"/>
+ <syscall name="write" number="4"/>
+ <syscall name="open" number="5"/>
+ <syscall name="close" number="6"/>
+ <syscall name="waitpid" number="7"/>
+ <syscall name="creat" number="8"/>
+ <syscall name="link" number="9"/>
+ <syscall name="unlink" number="10"/>
+ <syscall name="execve" number="11"/>
+ <syscall name="chdir" number="12"/>
+ <syscall name="time" number="13"/>
+ <syscall name="mknod" number="14"/>
+ <syscall name="chmod" number="15"/>
+ <syscall name="lchown" number="16"/>
+ <syscall name="break" number="17"/>
+ <syscall name="oldstat" number="18"/>
+ <syscall name="lseek" number="19"/>
+ <syscall name="getpid" number="20"/>
+ <syscall name="mount" number="21"/>
+ <syscall name="umount" number="22"/>
+ <syscall name="setuid" number="23"/>
+ <syscall name="getuid" number="24"/>
+ <syscall name="stime" number="25"/>
+ <syscall name="ptrace" number="26"/>
+ <syscall name="alarm" number="27"/>
+ <syscall name="oldfstat" number="28"/>
+ <syscall name="pause" number="29"/>
+ <syscall name="utime" number="30"/>
+ <syscall name="stty" number="31"/>
+ <syscall name="gtty" number="32"/>
+ <syscall name="access" number="33"/>
+ <syscall name="nice" number="34"/>
+ <syscall name="ftime" number="35"/>
+ <syscall name="sync" number="36"/>
+ <syscall name="kill" number="37"/>
+ <syscall name="rename" number="38"/>
+ <syscall name="mkdir" number="39"/>
+ <syscall name="rmdir" number="40"/>
+ <syscall name="dup" number="41"/>
+ <syscall name="pipe" number="42"/>
+ <syscall name="times" number="43"/>
+ <syscall name="prof" number="44"/>
+ <syscall name="brk" number="45"/>
+ <syscall name="setgid" number="46"/>
+ <syscall name="getgid" number="47"/>
+ <syscall name="signal" number="48"/>
+ <syscall name="geteuid" number="49"/>
+ <syscall name="getegid" number="50"/>
+ <syscall name="acct" number="51"/>
+ <syscall name="umount2" number="52"/>
+ <syscall name="lock" number="53"/>
+ <syscall name="ioctl" number="54"/>
+ <syscall name="fcntl" number="55"/>
+ <syscall name="mpx" number="56"/>
+ <syscall name="setpgid" number="57"/>
+ <syscall name="ulimit" number="58"/>
+ <syscall name="oldolduname" number="59"/>
+ <syscall name="umask" number="60"/>
+ <syscall name="chroot" number="61"/>
+ <syscall name="ustat" number="62"/>
+ <syscall name="dup2" number="63"/>
+ <syscall name="getppid" number="64"/>
+ <syscall name="getpgrp" number="65"/>
+ <syscall name="setsid" number="66"/>
+ <syscall name="sigaction" number="67"/>
+ <syscall name="sgetmask" number="68"/>
+ <syscall name="ssetmask" number="69"/>
+ <syscall name="setreuid" number="70"/>
+ <syscall name="setregid" number="71"/>
+ <syscall name="sigsuspend" number="72"/>
+ <syscall name="sigpending" number="73"/>
+ <syscall name="sethostname" number="74"/>
+ <syscall name="setrlimit" number="75"/>
+ <syscall name="getrlimit" number="76"/>
+ <syscall name="getrusage" number="77"/>
+ <syscall name="gettimeofday" number="78"/>
+ <syscall name="settimeofday" number="79"/>
+ <syscall name="getgroups" number="80"/>
+ <syscall name="setgroups" number="81"/>
+ <syscall name="select" number="82"/>
+ <syscall name="symlink" number="83"/>
+ <syscall name="oldlstat" number="84"/>
+ <syscall name="readlink" number="85"/>
+ <syscall name="uselib" number="86"/>
+ <syscall name="swapon" number="87"/>
+ <syscall name="reboot" number="88"/>
+ <syscall name="readdir" number="89"/>
+ <syscall name="mmap" number="90"/>
+ <syscall name="munmap" number="91"/>
+ <syscall name="truncate" number="92"/>
+ <syscall name="ftruncate" number="93"/>
+ <syscall name="fchmod" number="94"/>
+ <syscall name="fchown" number="95"/>
+ <syscall name="getpriority" number="96"/>
+ <syscall name="setpriority" number="97"/>
+ <syscall name="profil" number="98"/>
+ <syscall name="statfs" number="99"/>
+ <syscall name="fstatfs" number="100"/>
+ <syscall name="ioperm" number="101"/>
+ <syscall name="socketcall" number="102"/>
+ <syscall name="syslog" number="103"/>
+ <syscall name="setitimer" number="104"/>
+ <syscall name="getitimer" number="105"/>
+ <syscall name="stat" number="106"/>
+ <syscall name="lstat" number="107"/>
+ <syscall name="fstat" number="108"/>
+ <syscall name="olduname" number="109"/>
+ <syscall name="iopl" number="110"/>
+ <syscall name="vhangup" number="111"/>
+ <syscall name="idle" number="112"/>
+ <syscall name="vm86" number="113"/>
+ <syscall name="wait4" number="114"/>
+ <syscall name="swapoff" number="115"/>
+ <syscall name="sysinfo" number="116"/>
+ <syscall name="ipc" number="117"/>
+ <syscall name="fsync" number="118"/>
+ <syscall name="sigreturn" number="119"/>
+ <syscall name="clone" number="120"/>
+ <syscall name="setdomainname" number="121"/>
+ <syscall name="uname" number="122"/>
+ <syscall name="modify_ldt" number="123"/>
+ <syscall name="adjtimex" number="124"/>
+ <syscall name="mprotect" number="125"/>
+ <syscall name="sigprocmask" number="126"/>
+ <syscall name="create_module" number="127"/>
+ <syscall name="init_module" number="128"/>
+ <syscall name="delete_module" number="129"/>
+ <syscall name="get_kernel_syms" number="130"/>
+ <syscall name="quotactl" number="131"/>
+ <syscall name="getpgid" number="132"/>
+ <syscall name="fchdir" number="133"/>
+ <syscall name="bdflush" number="134"/>
+ <syscall name="sysfs" number="135"/>
+ <syscall name="personality" number="136"/>
+ <syscall name="afs_syscall" number="137"/>
+ <syscall name="setfsuid" number="138"/>
+ <syscall name="setfsgid" number="139"/>
+ <syscall name="_llseek" number="140"/>
+ <syscall name="getdents" number="141"/>
+ <syscall name="_newselect" number="142"/>
+ <syscall name="flock" number="143"/>
+ <syscall name="msync" number="144"/>
+ <syscall name="readv" number="145"/>
+ <syscall name="writev" number="146"/>
+ <syscall name="getsid" number="147"/>
+ <syscall name="fdatasync" number="148"/>
+ <syscall name="_sysctl" number="149"/>
+ <syscall name="mlock" number="150"/>
+ <syscall name="munlock" number="151"/>
+ <syscall name="mlockall" number="152"/>
+ <syscall name="munlockall" number="153"/>
+ <syscall name="sched_setparam" number="154"/>
+ <syscall name="sched_getparam" number="155"/>
+ <syscall name="sched_setscheduler" number="156"/>
+ <syscall name="sched_getscheduler" number="157"/>
+ <syscall name="sched_yield" number="158"/>
+ <syscall name="sched_get_priority_max" number="159"/>
+ <syscall name="sched_get_priority_min" number="160"/>
+ <syscall name="sched_rr_get_interval" number="161"/>
+ <syscall name="nanosleep" number="162"/>
+ <syscall name="mremap" number="163"/>
+ <syscall name="setresuid" number="164"/>
+ <syscall name="getresuid" number="165"/>
+ <syscall name="query_module" number="166"/>
+ <syscall name="poll" number="167"/>
+ <syscall name="nfsservctl" number="168"/>
+ <syscall name="setresgid" number="169"/>
+ <syscall name="getresgid" number="170"/>
+ <syscall name="prctl" number="171"/>
+ <syscall name="rt_sigreturn" number="172"/>
+ <syscall name="rt_sigaction" number="173"/>
+ <syscall name="rt_sigprocmask" number="174"/>
+ <syscall name="rt_sigpending" number="175"/>
+ <syscall name="rt_sigtimedwait" number="176"/>
+ <syscall name="rt_sigqueueinfo" number="177"/>
+ <syscall name="rt_sigsuspend" number="178"/>
+ <syscall name="pread64" number="179"/>
+ <syscall name="pwrite64" number="180"/>
+ <syscall name="chown" number="181"/>
+ <syscall name="getcwd" number="182"/>
+ <syscall name="capget" number="183"/>
+ <syscall name="capset" number="184"/>
+ <syscall name="sigaltstack" number="185"/>
+ <syscall name="sendfile" number="186"/>
+ <syscall name="getpmsg" number="187"/>
+ <syscall name="putpmsg" number="188"/>
+ <syscall name="vfork" number="189"/>
+ <syscall name="ugetrlimit" number="190"/>
+ <syscall name="readahead" number="191"/>
+ <syscall name="pciconfig_read" number="198"/>
+ <syscall name="pciconfig_write" number="199"/>
+ <syscall name="pciconfig_iobase" number="200"/>
+ <syscall name="multiplexer" number="201"/>
+ <syscall name="getdents64" number="202"/>
+ <syscall name="pivot_root" number="203"/>
+ <syscall name="madvise" number="205"/>
+ <syscall name="mincore" number="206"/>
+ <syscall name="gettid" number="207"/>
+ <syscall name="tkill" number="208"/>
+ <syscall name="setxattr" number="209"/>
+ <syscall name="lsetxattr" number="210"/>
+ <syscall name="fsetxattr" number="211"/>
+ <syscall name="getxattr" number="212"/>
+ <syscall name="lgetxattr" number="213"/>
+ <syscall name="fgetxattr" number="214"/>
+ <syscall name="listxattr" number="215"/>
+ <syscall name="llistxattr" number="216"/>
+ <syscall name="flistxattr" number="217"/>
+ <syscall name="removexattr" number="218"/>
+ <syscall name="lremovexattr" number="219"/>
+ <syscall name="fremovexattr" number="220"/>
+ <syscall name="futex" number="221"/>
+ <syscall name="sched_setaffinity" number="222"/>
+ <syscall name="sched_getaffinity" number="223"/>
+ <syscall name="tuxcall" number="225"/>
+ <syscall name="io_setup" number="227"/>
+ <syscall name="io_destroy" number="228"/>
+ <syscall name="io_getevents" number="229"/>
+ <syscall name="io_submit" number="230"/>
+ <syscall name="io_cancel" number="231"/>
+ <syscall name="set_tid_address" number="232"/>
+ <syscall name="fadvise64" number="233"/>
+ <syscall name="exit_group" number="234"/>
+ <syscall name="lookup_dcookie" number="235"/>
+ <syscall name="epoll_create" number="236"/>
+ <syscall name="epoll_ctl" number="237"/>
+ <syscall name="epoll_wait" number="238"/>
+ <syscall name="remap_file_pages" number="239"/>
+ <syscall name="timer_create" number="240"/>
+ <syscall name="timer_settime" number="241"/>
+ <syscall name="timer_gettime" number="242"/>
+ <syscall name="timer_getoverrun" number="243"/>
+ <syscall name="timer_delete" number="244"/>
+ <syscall name="clock_settime" number="245"/>
+ <syscall name="clock_gettime" number="246"/>
+ <syscall name="clock_getres" number="247"/>
+ <syscall name="clock_nanosleep" number="248"/>
+ <syscall name="swapcontext" number="249"/>
+ <syscall name="tgkill" number="250"/>
+ <syscall name="utimes" number="251"/>
+ <syscall name="statfs64" number="252"/>
+ <syscall name="fstatfs64" number="253"/>
+ <syscall name="rtas" number="255"/>
+ <syscall name="sys_debug_setcontext" number="256"/>
+ <syscall name="mbind" number="259"/>
+ <syscall name="get_mempolicy" number="260"/>
+ <syscall name="set_mempolicy" number="261"/>
+ <syscall name="mq_open" number="262"/>
+ <syscall name="mq_unlink" number="263"/>
+ <syscall name="mq_timedsend" number="264"/>
+ <syscall name="mq_timedreceive" number="265"/>
+ <syscall name="mq_notify" number="266"/>
+ <syscall name="mq_getsetattr" number="267"/>
+ <syscall name="kexec_load" number="268"/>
+ <syscall name="add_key" number="269"/>
+ <syscall name="request_key" number="270"/>
+ <syscall name="keyctl" number="271"/>
+ <syscall name="waitid" number="272"/>
+ <syscall name="ioprio_set" number="273"/>
+ <syscall name="ioprio_get" number="274"/>
+ <syscall name="inotify_init" number="275"/>
+ <syscall name="inotify_add_watch" number="276"/>
+ <syscall name="inotify_rm_watch" number="277"/>
+ <syscall name="spu_run" number="278"/>
+ <syscall name="spu_create" number="279"/>
+ <syscall name="pselect6" number="280"/>
+ <syscall name="ppoll" number="281"/>
+ <syscall name="unshare" number="282"/>
+ <syscall name="unlinkat" number="286"/>
+ <syscall name="renameat" number="287"/>
+ <syscall name="linkat" number="288"/>
+ <syscall name="symlinkat" number="289"/>
+ <syscall name="readlinkat" number="290"/>
+ <syscall name="fchmodat" number="291"/>
+ <syscall name="faccessat" number="292"/>
+</syscalls_info>
diff --git a/share/gdb/syscalls/sparc-linux.xml b/share/gdb/syscalls/sparc-linux.xml
new file mode 100644
index 0000000..7673621
--- /dev/null
+++ b/share/gdb/syscalls/sparc-linux.xml
@@ -0,0 +1,344 @@
+<?xml version="1.0"?>
+<!-- Copyright (C) 2010-2013 Free Software Foundation, Inc.
+
+ Copying and distribution of this file, with or without modification,
+ are permitted in any medium without royalty provided the copyright
+ notice and this notice are preserved. -->
+
+<!DOCTYPE feature SYSTEM "gdb-syscalls.dtd">
+
+<!-- This file was generated using the following file:
+
+ /usr/src/linux/arch/sparc/include/asm/unistd.h
+
+ The file mentioned above belongs to the Linux Kernel. -->
+
+<syscalls_info>
+ <syscall name="restart_syscall" number="0"/>
+ <syscall name="exit" number="1"/>
+ <syscall name="fork" number="2"/>
+ <syscall name="read" number="3"/>
+ <syscall name="write" number="4"/>
+ <syscall name="open" number="5"/>
+ <syscall name="close" number="6"/>
+ <syscall name="wait4" number="7"/>
+ <syscall name="creat" number="8"/>
+ <syscall name="link" number="9"/>
+ <syscall name="unlink" number="10"/>
+ <syscall name="execv" number="11"/>
+ <syscall name="chdir" number="12"/>
+ <syscall name="chown" number="13"/>
+ <syscall name="mknod" number="14"/>
+ <syscall name="chmod" number="15"/>
+ <syscall name="lchown" number="16"/>
+ <syscall name="brk" number="17"/>
+ <syscall name="perfctr" number="18"/>
+ <syscall name="lseek" number="19"/>
+ <syscall name="getpid" number="20"/>
+ <syscall name="capget" number="21"/>
+ <syscall name="capset" number="22"/>
+ <syscall name="setuid" number="23"/>
+ <syscall name="getuid" number="24"/>
+ <syscall name="vmsplice" number="25"/>
+ <syscall name="ptrace" number="26"/>
+ <syscall name="alarm" number="27"/>
+ <syscall name="sigaltstack" number="28"/>
+ <syscall name="pause" number="29"/>
+ <syscall name="utime" number="30"/>
+ <syscall name="lchown32" number="31"/>
+ <syscall name="fchown32" number="32"/>
+ <syscall name="access" number="33"/>
+ <syscall name="nice" number="34"/>
+ <syscall name="chown32" number="35"/>
+ <syscall name="sync" number="36"/>
+ <syscall name="kill" number="37"/>
+ <syscall name="stat" number="38"/>
+ <syscall name="sendfile" number="39"/>
+ <syscall name="lstat" number="40"/>
+ <syscall name="dup" number="41"/>
+ <syscall name="pipe" number="42"/>
+ <syscall name="times" number="43"/>
+ <syscall name="getuid32" number="44"/>
+ <syscall name="umount2" number="45"/>
+ <syscall name="setgid" number="46"/>
+ <syscall name="getgid" number="47"/>
+ <syscall name="signal" number="48"/>
+ <syscall name="geteuid" number="49"/>
+ <syscall name="getegid" number="50"/>
+ <syscall name="acct" number="51"/>
+ <syscall name="getgid32" number="53"/>
+ <syscall name="ioctl" number="54"/>
+ <syscall name="reboot" number="55"/>
+ <syscall name="mmap2" number="56"/>
+ <syscall name="symlink" number="57"/>
+ <syscall name="readlink" number="58"/>
+ <syscall name="execve" number="59"/>
+ <syscall name="umask" number="60"/>
+ <syscall name="chroot" number="61"/>
+ <syscall name="fstat" number="62"/>
+ <syscall name="fstat64" number="63"/>
+ <syscall name="getpagesize" number="64"/>
+ <syscall name="msync" number="65"/>
+ <syscall name="vfork" number="66"/>
+ <syscall name="pread64" number="67"/>
+ <syscall name="pwrite64" number="68"/>
+ <syscall name="geteuid32" number="69"/>
+ <syscall name="getegid32" number="70"/>
+ <syscall name="mmap" number="71"/>
+ <syscall name="setreuid32" number="72"/>
+ <syscall name="munmap" number="73"/>
+ <syscall name="mprotect" number="74"/>
+ <syscall name="madvise" number="75"/>
+ <syscall name="vhangup" number="76"/>
+ <syscall name="truncate64" number="77"/>
+ <syscall name="mincore" number="78"/>
+ <syscall name="getgroups" number="79"/>
+ <syscall name="setgroups" number="80"/>
+ <syscall name="getpgrp" number="81"/>
+ <syscall name="setgroups32" number="82"/>
+ <syscall name="setitimer" number="83"/>
+ <syscall name="ftruncate64" number="84"/>
+ <syscall name="swapon" number="85"/>
+ <syscall name="getitimer" number="86"/>
+ <syscall name="setuid32" number="87"/>
+ <syscall name="sethostname" number="88"/>
+ <syscall name="setgid32" number="89"/>
+ <syscall name="dup2" number="90"/>
+ <syscall name="setfsuid32" number="91"/>
+ <syscall name="fcntl" number="92"/>
+ <syscall name="select" number="93"/>
+ <syscall name="setfsgid32" number="94"/>
+ <syscall name="fsync" number="95"/>
+ <syscall name="setpriority" number="96"/>
+ <syscall name="socket" number="97"/>
+ <syscall name="connect" number="98"/>
+ <syscall name="accept" number="99"/>
+ <syscall name="getpriority" number="100"/>
+ <syscall name="rt_sigreturn" number="101"/>
+ <syscall name="rt_sigaction" number="102"/>
+ <syscall name="rt_sigprocmask" number="103"/>
+ <syscall name="rt_sigpending" number="104"/>
+ <syscall name="rt_sigtimedwait" number="105"/>
+ <syscall name="rt_sigqueueinfo" number="106"/>
+ <syscall name="rt_sigsuspend" number="107"/>
+ <syscall name="setresuid32" number="108"/>
+ <syscall name="getresuid32" number="109"/>
+ <syscall name="setresgid32" number="110"/>
+ <syscall name="getresgid32" number="111"/>
+ <syscall name="setregid32" number="112"/>
+ <syscall name="recvmsg" number="113"/>
+ <syscall name="sendmsg" number="114"/>
+ <syscall name="getgroups32" number="115"/>
+ <syscall name="gettimeofday" number="116"/>
+ <syscall name="getrusage" number="117"/>
+ <syscall name="getsockopt" number="118"/>
+ <syscall name="getcwd" number="119"/>
+ <syscall name="readv" number="120"/>
+ <syscall name="writev" number="121"/>
+ <syscall name="settimeofday" number="122"/>
+ <syscall name="fchown" number="123"/>
+ <syscall name="fchmod" number="124"/>
+ <syscall name="recvfrom" number="125"/>
+ <syscall name="setreuid" number="126"/>
+ <syscall name="setregid" number="127"/>
+ <syscall name="rename" number="128"/>
+ <syscall name="truncate" number="129"/>
+ <syscall name="ftruncate" number="130"/>
+ <syscall name="flock" number="131"/>
+ <syscall name="lstat64" number="132"/>
+ <syscall name="sendto" number="133"/>
+ <syscall name="shutdown" number="134"/>
+ <syscall name="socketpair" number="135"/>
+ <syscall name="mkdir" number="136"/>
+ <syscall name="rmdir" number="137"/>
+ <syscall name="utimes" number="138"/>
+ <syscall name="stat64" number="139"/>
+ <syscall name="sendfile64" number="140"/>
+ <syscall name="getpeername" number="141"/>
+ <syscall name="futex" number="142"/>
+ <syscall name="gettid" number="143"/>
+ <syscall name="getrlimit" number="144"/>
+ <syscall name="setrlimit" number="145"/>
+ <syscall name="pivot_root" number="146"/>
+ <syscall name="prctl" number="147"/>
+ <syscall name="pciconfig_read" number="148"/>
+ <syscall name="pciconfig_write" number="149"/>
+ <syscall name="getsockname" number="150"/>
+ <syscall name="inotify_init" number="151"/>
+ <syscall name="inotify_add_watch" number="152"/>
+ <syscall name="poll" number="153"/>
+ <syscall name="getdents64" number="154"/>
+ <syscall name="fcntl64" number="155"/>
+ <syscall name="inotify_rm_watch" number="156"/>
+ <syscall name="statfs" number="157"/>
+ <syscall name="fstatfs" number="158"/>
+ <syscall name="umount" number="159"/>
+ <syscall name="sched_set_affinity" number="160"/>
+ <syscall name="sched_get_affinity" number="161"/>
+ <syscall name="getdomainname" number="162"/>
+ <syscall name="setdomainname" number="163"/>
+ <syscall name="quotactl" number="165"/>
+ <syscall name="set_tid_address" number="166"/>
+ <syscall name="mount" number="167"/>
+ <syscall name="ustat" number="168"/>
+ <syscall name="setxattr" number="169"/>
+ <syscall name="lsetxattr" number="170"/>
+ <syscall name="fsetxattr" number="171"/>
+ <syscall name="getxattr" number="172"/>
+ <syscall name="lgetxattr" number="173"/>
+ <syscall name="getdents" number="174"/>
+ <syscall name="setsid" number="175"/>
+ <syscall name="fchdir" number="176"/>
+ <syscall name="fgetxattr" number="177"/>
+ <syscall name="listxattr" number="178"/>
+ <syscall name="llistxattr" number="179"/>
+ <syscall name="flistxattr" number="180"/>
+ <syscall name="removexattr" number="181"/>
+ <syscall name="lremovexattr" number="182"/>
+ <syscall name="sigpending" number="183"/>
+ <syscall name="query_module" number="184"/>
+ <syscall name="setpgid" number="185"/>
+ <syscall name="fremovexattr" number="186"/>
+ <syscall name="tkill" number="187"/>
+ <syscall name="exit_group" number="188"/>
+ <syscall name="uname" number="189"/>
+ <syscall name="init_module" number="190"/>
+ <syscall name="personality" number="191"/>
+ <syscall name="remap_file_pages" number="192"/>
+ <syscall name="epoll_create" number="193"/>
+ <syscall name="epoll_ctl" number="194"/>
+ <syscall name="epoll_wait" number="195"/>
+ <syscall name="ioprio_set" number="196"/>
+ <syscall name="getppid" number="197"/>
+ <syscall name="sigaction" number="198"/>
+ <syscall name="sgetmask" number="199"/>
+ <syscall name="ssetmask" number="200"/>
+ <syscall name="sigsuspend" number="201"/>
+ <syscall name="oldlstat" number="202"/>
+ <syscall name="uselib" number="203"/>
+ <syscall name="readdir" number="204"/>
+ <syscall name="readahead" number="205"/>
+ <syscall name="socketcall" number="206"/>
+ <syscall name="syslog" number="207"/>
+ <syscall name="lookup_dcookie" number="208"/>
+ <syscall name="fadvise64" number="209"/>
+ <syscall name="fadvise64_64" number="210"/>
+ <syscall name="tgkill" number="211"/>
+ <syscall name="waitpid" number="212"/>
+ <syscall name="swapoff" number="213"/>
+ <syscall name="sysinfo" number="214"/>
+ <syscall name="ipc" number="215"/>
+ <syscall name="sigreturn" number="216"/>
+ <syscall name="clone" number="217"/>
+ <syscall name="ioprio_get" number="218"/>
+ <syscall name="adjtimex" number="219"/>
+ <syscall name="sigprocmask" number="220"/>
+ <syscall name="create_module" number="221"/>
+ <syscall name="delete_module" number="222"/>
+ <syscall name="get_kernel_syms" number="223"/>
+ <syscall name="getpgid" number="224"/>
+ <syscall name="bdflush" number="225"/>
+ <syscall name="sysfs" number="226"/>
+ <syscall name="afs_syscall" number="227"/>
+ <syscall name="setfsuid" number="228"/>
+ <syscall name="setfsgid" number="229"/>
+ <syscall name="_newselect" number="230"/>
+ <syscall name="time" number="231"/>
+ <syscall name="splice" number="232"/>
+ <syscall name="stime" number="233"/>
+ <syscall name="statfs64" number="234"/>
+ <syscall name="fstatfs64" number="235"/>
+ <syscall name="_llseek" number="236"/>
+ <syscall name="mlock" number="237"/>
+ <syscall name="munlock" number="238"/>
+ <syscall name="mlockall" number="239"/>
+ <syscall name="munlockall" number="240"/>
+ <syscall name="sched_setparam" number="241"/>
+ <syscall name="sched_getparam" number="242"/>
+ <syscall name="sched_setscheduler" number="243"/>
+ <syscall name="sched_getscheduler" number="244"/>
+ <syscall name="sched_yield" number="245"/>
+ <syscall name="sched_get_priority_max" number="246"/>
+ <syscall name="sched_get_priority_min" number="247"/>
+ <syscall name="sched_rr_get_interval" number="248"/>
+ <syscall name="nanosleep" number="249"/>
+ <syscall name="mremap" number="250"/>
+ <syscall name="_sysctl" number="251"/>
+ <syscall name="getsid" number="252"/>
+ <syscall name="fdatasync" number="253"/>
+ <syscall name="nfsservctl" number="254"/>
+ <syscall name="sync_file_range" number="255"/>
+ <syscall name="clock_settime" number="256"/>
+ <syscall name="clock_gettime" number="257"/>
+ <syscall name="clock_getres" number="258"/>
+ <syscall name="clock_nanosleep" number="259"/>
+ <syscall name="sched_getaffinity" number="260"/>
+ <syscall name="sched_setaffinity" number="261"/>
+ <syscall name="timer_settime" number="262"/>
+ <syscall name="timer_gettime" number="263"/>
+ <syscall name="timer_getoverrun" number="264"/>
+ <syscall name="timer_delete" number="265"/>
+ <syscall name="timer_create" number="266"/>
+ <syscall name="vserver" number="267"/>
+ <syscall name="io_setup" number="268"/>
+ <syscall name="io_destroy" number="269"/>
+ <syscall name="io_submit" number="270"/>
+ <syscall name="io_cancel" number="271"/>
+ <syscall name="io_getevents" number="272"/>
+ <syscall name="mq_open" number="273"/>
+ <syscall name="mq_unlink" number="274"/>
+ <syscall name="mq_timedsend" number="275"/>
+ <syscall name="mq_timedreceive" number="276"/>
+ <syscall name="mq_notify" number="277"/>
+ <syscall name="mq_getsetattr" number="278"/>
+ <syscall name="waitid" number="279"/>
+ <syscall name="tee" number="280"/>
+ <syscall name="add_key" number="281"/>
+ <syscall name="request_key" number="282"/>
+ <syscall name="keyctl" number="283"/>
+ <syscall name="openat" number="284"/>
+ <syscall name="mkdirat" number="285"/>
+ <syscall name="mknodat" number="286"/>
+ <syscall name="fchownat" number="287"/>
+ <syscall name="futimesat" number="288"/>
+ <syscall name="fstatat64" number="289"/>
+ <syscall name="unlinkat" number="290"/>
+ <syscall name="renameat" number="291"/>
+ <syscall name="linkat" number="292"/>
+ <syscall name="symlinkat" number="293"/>
+ <syscall name="readlinkat" number="294"/>
+ <syscall name="fchmodat" number="295"/>
+ <syscall name="faccessat" number="296"/>
+ <syscall name="pselect6" number="297"/>
+ <syscall name="ppoll" number="298"/>
+ <syscall name="unshare" number="299"/>
+ <syscall name="set_robust_list" number="300"/>
+ <syscall name="get_robust_list" number="301"/>
+ <syscall name="migrate_pages" number="302"/>
+ <syscall name="mbind" number="303"/>
+ <syscall name="get_mempolicy" number="304"/>
+ <syscall name="set_mempolicy" number="305"/>
+ <syscall name="kexec_load" number="306"/>
+ <syscall name="move_pages" number="307"/>
+ <syscall name="getcpu" number="308"/>
+ <syscall name="epoll_pwait" number="309"/>
+ <syscall name="utimensat" number="310"/>
+ <syscall name="signalfd" number="311"/>
+ <syscall name="timerfd_create" number="312"/>
+ <syscall name="eventfd" number="313"/>
+ <syscall name="fallocate" number="314"/>
+ <syscall name="timerfd_settime" number="315"/>
+ <syscall name="timerfd_gettime" number="316"/>
+ <syscall name="signalfd4" number="317"/>
+ <syscall name="eventfd2" number="318"/>
+ <syscall name="epoll_create1" number="319"/>
+ <syscall name="dup3" number="320"/>
+ <syscall name="pipe2" number="321"/>
+ <syscall name="inotify_init1" number="322"/>
+ <syscall name="accept4" number="323"/>
+ <syscall name="preadv" number="324"/>
+ <syscall name="pwritev" number="325"/>
+ <syscall name="rt_tgsigqueueinfo" number="326"/>
+ <syscall name="perf_event_open" number="327"/>
+ <syscall name="recvmmsg" number="328"/>
+</syscalls_info>
diff --git a/share/gdb/syscalls/sparc64-linux.xml b/share/gdb/syscalls/sparc64-linux.xml
new file mode 100644
index 0000000..4403ca3
--- /dev/null
+++ b/share/gdb/syscalls/sparc64-linux.xml
@@ -0,0 +1,326 @@
+<?xml version="1.0"?>
+<!-- Copyright (C) 2010-2013 Free Software Foundation, Inc.
+
+ Copying and distribution of this file, with or without modification,
+ are permitted in any medium without royalty provided the copyright
+ notice and this notice are preserved. -->
+
+<!DOCTYPE feature SYSTEM "gdb-syscalls.dtd">
+
+<!-- This file was generated using the following file:
+
+ /usr/src/linux/arch/sparc/include/asm/unistd.h
+
+ The file mentioned above belongs to the Linux Kernel. -->
+
+<syscalls_info>
+ <syscall name="restart_syscall" number="0"/>
+ <syscall name="exit" number="1"/>
+ <syscall name="fork" number="2"/>
+ <syscall name="read" number="3"/>
+ <syscall name="write" number="4"/>
+ <syscall name="open" number="5"/>
+ <syscall name="close" number="6"/>
+ <syscall name="wait4" number="7"/>
+ <syscall name="creat" number="8"/>
+ <syscall name="link" number="9"/>
+ <syscall name="unlink" number="10"/>
+ <syscall name="execv" number="11"/>
+ <syscall name="chdir" number="12"/>
+ <syscall name="chown" number="13"/>
+ <syscall name="mknod" number="14"/>
+ <syscall name="chmod" number="15"/>
+ <syscall name="lchown" number="16"/>
+ <syscall name="brk" number="17"/>
+ <syscall name="perfctr" number="18"/>
+ <syscall name="lseek" number="19"/>
+ <syscall name="getpid" number="20"/>
+ <syscall name="capget" number="21"/>
+ <syscall name="capset" number="22"/>
+ <syscall name="setuid" number="23"/>
+ <syscall name="getuid" number="24"/>
+ <syscall name="vmsplice" number="25"/>
+ <syscall name="ptrace" number="26"/>
+ <syscall name="alarm" number="27"/>
+ <syscall name="sigaltstack" number="28"/>
+ <syscall name="pause" number="29"/>
+ <syscall name="utime" number="30"/>
+ <syscall name="access" number="33"/>
+ <syscall name="nice" number="34"/>
+ <syscall name="sync" number="36"/>
+ <syscall name="kill" number="37"/>
+ <syscall name="stat" number="38"/>
+ <syscall name="sendfile" number="39"/>
+ <syscall name="lstat" number="40"/>
+ <syscall name="dup" number="41"/>
+ <syscall name="pipe" number="42"/>
+ <syscall name="times" number="43"/>
+ <syscall name="umount2" number="45"/>
+ <syscall name="setgid" number="46"/>
+ <syscall name="getgid" number="47"/>
+ <syscall name="signal" number="48"/>
+ <syscall name="geteuid" number="49"/>
+ <syscall name="getegid" number="50"/>
+ <syscall name="acct" number="51"/>
+ <syscall name="memory_ordering" number="52"/>
+ <syscall name="ioctl" number="54"/>
+ <syscall name="reboot" number="55"/>
+ <syscall name="symlink" number="57"/>
+ <syscall name="readlink" number="58"/>
+ <syscall name="execve" number="59"/>
+ <syscall name="umask" number="60"/>
+ <syscall name="chroot" number="61"/>
+ <syscall name="fstat" number="62"/>
+ <syscall name="fstat64" number="63"/>
+ <syscall name="getpagesize" number="64"/>
+ <syscall name="msync" number="65"/>
+ <syscall name="vfork" number="66"/>
+ <syscall name="pread64" number="67"/>
+ <syscall name="pwrite64" number="68"/>
+ <syscall name="mmap" number="71"/>
+ <syscall name="munmap" number="73"/>
+ <syscall name="mprotect" number="74"/>
+ <syscall name="madvise" number="75"/>
+ <syscall name="vhangup" number="76"/>
+ <syscall name="mincore" number="78"/>
+ <syscall name="getgroups" number="79"/>
+ <syscall name="setgroups" number="80"/>
+ <syscall name="getpgrp" number="81"/>
+ <syscall name="setitimer" number="83"/>
+ <syscall name="swapon" number="85"/>
+ <syscall name="getitimer" number="86"/>
+ <syscall name="sethostname" number="88"/>
+ <syscall name="dup2" number="90"/>
+ <syscall name="fcntl" number="92"/>
+ <syscall name="select" number="93"/>
+ <syscall name="fsync" number="95"/>
+ <syscall name="setpriority" number="96"/>
+ <syscall name="socket" number="97"/>
+ <syscall name="connect" number="98"/>
+ <syscall name="accept" number="99"/>
+ <syscall name="getpriority" number="100"/>
+ <syscall name="rt_sigreturn" number="101"/>
+ <syscall name="rt_sigaction" number="102"/>
+ <syscall name="rt_sigprocmask" number="103"/>
+ <syscall name="rt_sigpending" number="104"/>
+ <syscall name="rt_sigtimedwait" number="105"/>
+ <syscall name="rt_sigqueueinfo" number="106"/>
+ <syscall name="rt_sigsuspend" number="107"/>
+ <syscall name="setresuid" number="108"/>
+ <syscall name="getresuid" number="109"/>
+ <syscall name="setresgid" number="110"/>
+ <syscall name="getresgid" number="111"/>
+ <syscall name="recvmsg" number="113"/>
+ <syscall name="sendmsg" number="114"/>
+ <syscall name="gettimeofday" number="116"/>
+ <syscall name="getrusage" number="117"/>
+ <syscall name="getsockopt" number="118"/>
+ <syscall name="getcwd" number="119"/>
+ <syscall name="readv" number="120"/>
+ <syscall name="writev" number="121"/>
+ <syscall name="settimeofday" number="122"/>
+ <syscall name="fchown" number="123"/>
+ <syscall name="fchmod" number="124"/>
+ <syscall name="recvfrom" number="125"/>
+ <syscall name="setreuid" number="126"/>
+ <syscall name="setregid" number="127"/>
+ <syscall name="rename" number="128"/>
+ <syscall name="truncate" number="129"/>
+ <syscall name="ftruncate" number="130"/>
+ <syscall name="flock" number="131"/>
+ <syscall name="lstat64" number="132"/>
+ <syscall name="sendto" number="133"/>
+ <syscall name="shutdown" number="134"/>
+ <syscall name="socketpair" number="135"/>
+ <syscall name="mkdir" number="136"/>
+ <syscall name="rmdir" number="137"/>
+ <syscall name="utimes" number="138"/>
+ <syscall name="stat64" number="139"/>
+ <syscall name="sendfile64" number="140"/>
+ <syscall name="getpeername" number="141"/>
+ <syscall name="futex" number="142"/>
+ <syscall name="gettid" number="143"/>
+ <syscall name="getrlimit" number="144"/>
+ <syscall name="setrlimit" number="145"/>
+ <syscall name="pivot_root" number="146"/>
+ <syscall name="prctl" number="147"/>
+ <syscall name="pciconfig_read" number="148"/>
+ <syscall name="pciconfig_write" number="149"/>
+ <syscall name="getsockname" number="150"/>
+ <syscall name="inotify_init" number="151"/>
+ <syscall name="inotify_add_watch" number="152"/>
+ <syscall name="poll" number="153"/>
+ <syscall name="getdents64" number="154"/>
+ <syscall name="inotify_rm_watch" number="156"/>
+ <syscall name="statfs" number="157"/>
+ <syscall name="fstatfs" number="158"/>
+ <syscall name="umount" number="159"/>
+ <syscall name="sched_set_affinity" number="160"/>
+ <syscall name="sched_get_affinity" number="161"/>
+ <syscall name="getdomainname" number="162"/>
+ <syscall name="setdomainname" number="163"/>
+ <syscall name="utrap_install" number="164"/>
+ <syscall name="quotactl" number="165"/>
+ <syscall name="set_tid_address" number="166"/>
+ <syscall name="mount" number="167"/>
+ <syscall name="ustat" number="168"/>
+ <syscall name="setxattr" number="169"/>
+ <syscall name="lsetxattr" number="170"/>
+ <syscall name="fsetxattr" number="171"/>
+ <syscall name="getxattr" number="172"/>
+ <syscall name="lgetxattr" number="173"/>
+ <syscall name="getdents" number="174"/>
+ <syscall name="setsid" number="175"/>
+ <syscall name="fchdir" number="176"/>
+ <syscall name="fgetxattr" number="177"/>
+ <syscall name="listxattr" number="178"/>
+ <syscall name="llistxattr" number="179"/>
+ <syscall name="flistxattr" number="180"/>
+ <syscall name="removexattr" number="181"/>
+ <syscall name="lremovexattr" number="182"/>
+ <syscall name="sigpending" number="183"/>
+ <syscall name="query_module" number="184"/>
+ <syscall name="setpgid" number="185"/>
+ <syscall name="fremovexattr" number="186"/>
+ <syscall name="tkill" number="187"/>
+ <syscall name="exit_group" number="188"/>
+ <syscall name="uname" number="189"/>
+ <syscall name="init_module" number="190"/>
+ <syscall name="personality" number="191"/>
+ <syscall name="remap_file_pages" number="192"/>
+ <syscall name="epoll_create" number="193"/>
+ <syscall name="epoll_ctl" number="194"/>
+ <syscall name="epoll_wait" number="195"/>
+ <syscall name="ioprio_set" number="196"/>
+ <syscall name="getppid" number="197"/>
+ <syscall name="sigaction" number="198"/>
+ <syscall name="sgetmask" number="199"/>
+ <syscall name="ssetmask" number="200"/>
+ <syscall name="sigsuspend" number="201"/>
+ <syscall name="oldlstat" number="202"/>
+ <syscall name="uselib" number="203"/>
+ <syscall name="readdir" number="204"/>
+ <syscall name="readahead" number="205"/>
+ <syscall name="socketcall" number="206"/>
+ <syscall name="syslog" number="207"/>
+ <syscall name="lookup_dcookie" number="208"/>
+ <syscall name="fadvise64" number="209"/>
+ <syscall name="fadvise64_64" number="210"/>
+ <syscall name="tgkill" number="211"/>
+ <syscall name="waitpid" number="212"/>
+ <syscall name="swapoff" number="213"/>
+ <syscall name="sysinfo" number="214"/>
+ <syscall name="ipc" number="215"/>
+ <syscall name="sigreturn" number="216"/>
+ <syscall name="clone" number="217"/>
+ <syscall name="ioprio_get" number="218"/>
+ <syscall name="adjtimex" number="219"/>
+ <syscall name="sigprocmask" number="220"/>
+ <syscall name="create_module" number="221"/>
+ <syscall name="delete_module" number="222"/>
+ <syscall name="get_kernel_syms" number="223"/>
+ <syscall name="getpgid" number="224"/>
+ <syscall name="bdflush" number="225"/>
+ <syscall name="sysfs" number="226"/>
+ <syscall name="afs_syscall" number="227"/>
+ <syscall name="setfsuid" number="228"/>
+ <syscall name="setfsgid" number="229"/>
+ <syscall name="_newselect" number="230"/>
+ <syscall name="splice" number="232"/>
+ <syscall name="stime" number="233"/>
+ <syscall name="statfs64" number="234"/>
+ <syscall name="fstatfs64" number="235"/>
+ <syscall name="_llseek" number="236"/>
+ <syscall name="mlock" number="237"/>
+ <syscall name="munlock" number="238"/>
+ <syscall name="mlockall" number="239"/>
+ <syscall name="munlockall" number="240"/>
+ <syscall name="sched_setparam" number="241"/>
+ <syscall name="sched_getparam" number="242"/>
+ <syscall name="sched_setscheduler" number="243"/>
+ <syscall name="sched_getscheduler" number="244"/>
+ <syscall name="sched_yield" number="245"/>
+ <syscall name="sched_get_priority_max" number="246"/>
+ <syscall name="sched_get_priority_min" number="247"/>
+ <syscall name="sched_rr_get_interval" number="248"/>
+ <syscall name="nanosleep" number="249"/>
+ <syscall name="mremap" number="250"/>
+ <syscall name="_sysctl" number="251"/>
+ <syscall name="getsid" number="252"/>
+ <syscall name="fdatasync" number="253"/>
+ <syscall name="nfsservctl" number="254"/>
+ <syscall name="sync_file_range" number="255"/>
+ <syscall name="clock_settime" number="256"/>
+ <syscall name="clock_gettime" number="257"/>
+ <syscall name="clock_getres" number="258"/>
+ <syscall name="clock_nanosleep" number="259"/>
+ <syscall name="sched_getaffinity" number="260"/>
+ <syscall name="sched_setaffinity" number="261"/>
+ <syscall name="timer_settime" number="262"/>
+ <syscall name="timer_gettime" number="263"/>
+ <syscall name="timer_getoverrun" number="264"/>
+ <syscall name="timer_delete" number="265"/>
+ <syscall name="timer_create" number="266"/>
+ <syscall name="vserver" number="267"/>
+ <syscall name="io_setup" number="268"/>
+ <syscall name="io_destroy" number="269"/>
+ <syscall name="io_submit" number="270"/>
+ <syscall name="io_cancel" number="271"/>
+ <syscall name="io_getevents" number="272"/>
+ <syscall name="mq_open" number="273"/>
+ <syscall name="mq_unlink" number="274"/>
+ <syscall name="mq_timedsend" number="275"/>
+ <syscall name="mq_timedreceive" number="276"/>
+ <syscall name="mq_notify" number="277"/>
+ <syscall name="mq_getsetattr" number="278"/>
+ <syscall name="waitid" number="279"/>
+ <syscall name="tee" number="280"/>
+ <syscall name="add_key" number="281"/>
+ <syscall name="request_key" number="282"/>
+ <syscall name="keyctl" number="283"/>
+ <syscall name="openat" number="284"/>
+ <syscall name="mkdirat" number="285"/>
+ <syscall name="mknodat" number="286"/>
+ <syscall name="fchownat" number="287"/>
+ <syscall name="futimesat" number="288"/>
+ <syscall name="fstatat64" number="289"/>
+ <syscall name="unlinkat" number="290"/>
+ <syscall name="renameat" number="291"/>
+ <syscall name="linkat" number="292"/>
+ <syscall name="symlinkat" number="293"/>
+ <syscall name="readlinkat" number="294"/>
+ <syscall name="fchmodat" number="295"/>
+ <syscall name="faccessat" number="296"/>
+ <syscall name="pselect6" number="297"/>
+ <syscall name="ppoll" number="298"/>
+ <syscall name="unshare" number="299"/>
+ <syscall name="set_robust_list" number="300"/>
+ <syscall name="get_robust_list" number="301"/>
+ <syscall name="migrate_pages" number="302"/>
+ <syscall name="mbind" number="303"/>
+ <syscall name="get_mempolicy" number="304"/>
+ <syscall name="set_mempolicy" number="305"/>
+ <syscall name="kexec_load" number="306"/>
+ <syscall name="move_pages" number="307"/>
+ <syscall name="getcpu" number="308"/>
+ <syscall name="epoll_pwait" number="309"/>
+ <syscall name="utimensat" number="310"/>
+ <syscall name="signalfd" number="311"/>
+ <syscall name="timerfd_create" number="312"/>
+ <syscall name="eventfd" number="313"/>
+ <syscall name="fallocate" number="314"/>
+ <syscall name="timerfd_settime" number="315"/>
+ <syscall name="timerfd_gettime" number="316"/>
+ <syscall name="signalfd4" number="317"/>
+ <syscall name="eventfd2" number="318"/>
+ <syscall name="epoll_create1" number="319"/>
+ <syscall name="dup3" number="320"/>
+ <syscall name="pipe2" number="321"/>
+ <syscall name="inotify_init1" number="322"/>
+ <syscall name="accept4" number="323"/>
+ <syscall name="preadv" number="324"/>
+ <syscall name="pwritev" number="325"/>
+ <syscall name="rt_tgsigqueueinfo" number="326"/>
+ <syscall name="perf_event_open" number="327"/>
+ <syscall name="recvmmsg" number="328"/>
+</syscalls_info>
diff --git a/toolchain.mk b/toolchain.mk
new file mode 100644
index 0000000..2ecc24c
--- /dev/null
+++ b/toolchain.mk
@@ -0,0 +1,17 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH := $(call my-dir)
+
+include $(call all-makefiles-under, $(LOCAL_PATH))