summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBernhard Rosenkränzer <Bernhard.Rosenkranzer@linaro.org>2017-01-17 15:03:46 +0100
committerBernhard Rosenkränzer <Bernhard.Rosenkranzer@linaro.org>2017-01-17 15:03:46 +0100
commit6f35566cc701e156d73ed27b473875d9a38964fb (patch)
tree733bedaeda1afcf0dd65f9fb73fccff46d774df9
parentdcbd99ab398ba468966069ec0112715a5ddfc556 (diff)
downloadaarch64-linux-android-6.3-linaro-6f35566cc701e156d73ed27b473875d9a38964fb.tar.gz
Update to gcc 6.3-2017.01, downgrade to binutils 2.25
https://ci.linaro.org/job/android-gcc-toolchain/ build #21 binutils downgraded to work around https://llvm.org/bugs/show_bug.cgi?id=29017 Change-Id: Ic67680558a82d2328c088a869cc906853fca907a Signed-off-by: Bernhard Rosenkränzer <Bernhard.Rosenkranzer@linaro.org>
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf.x12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf.xbn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf.xc12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf.xn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf.xr10
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf.xs12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf.xsc12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf.xsw12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf.xu10
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf.xw12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32.x12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32.xbn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32.xc12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32.xn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32.xr10
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32.xs12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32.xsc12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32.xsw12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32.xu10
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32.xw12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32b.x12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32b.xbn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32b.xc12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32b.xn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32b.xr10
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32b.xs12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32b.xsc12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32b.xsw12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32b.xu10
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elf32b.xw12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elfb.x12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elfb.xbn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elfb.xc12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elfb.xn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elfb.xr10
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elfb.xs12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elfb.xsc12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elfb.xsw12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elfb.xu10
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64elfb.xw12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux.x14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux.xbn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux.xc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux.xd14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux.xdc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux.xdw14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux.xn14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux.xr10
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux.xs14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux.xsc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux.xsw14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux.xu10
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux.xw14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32.x14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32.xbn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32.xc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32.xd14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32.xdc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32.xdw14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32.xn14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32.xr10
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32.xs14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32.xsc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32.xsw14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32.xu10
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32.xw14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32b.x14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32b.xbn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32b.xc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32b.xd14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32b.xdc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32b.xdw14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32b.xn14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32b.xr10
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32b.xs14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32b.xsc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32b.xsw14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32b.xu10
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linux32b.xw14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linuxb.x14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linuxb.xbn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linuxb.xc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linuxb.xd14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linuxb.xdc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linuxb.xdw14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linuxb.xn14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linuxb.xr10
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linuxb.xs14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linuxb.xsc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linuxb.xsw14
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linuxb.xu10
-rw-r--r--aarch64-linux-android/lib/ldscripts/aarch64linuxb.xw14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf.x12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf.xbn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf.xc12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf.xn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf.xr10
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf.xs12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf.xsc12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf.xsw12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf.xu10
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf.xw12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.x14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xbn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xd14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xdc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xdw14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xn14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xr10
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xs14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xsc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xsw14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xu10
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xw14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb.x12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb.xbn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb.xc12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb.xn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb.xr10
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb.xs12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb.xsc12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb.xsw12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb.xu10
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb.xw12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.x14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xbn12
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xd14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xdc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xdw14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xn14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xr10
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xs14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xsc14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xsw14
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xu10
-rw-r--r--aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xw14
-rw-r--r--aarch64-linux-android/lib64/libatomic.abin330572 -> 333452 bytes
-rw-r--r--aarch64-linux-android/lib64/libgfortran.abin13397572 -> 13418268 bytes
-rw-r--r--aarch64-linux-android/lib64/libgomp.abin1654120 -> 1655488 bytes
-rw-r--r--aarch64-linux-android/lib64/libobjc.abin549798 -> 551254 bytes
-rwxr-xr-xbin/aarch64-linux-android-addr2linebin964504 -> 855736 bytes
-rwxr-xr-xbin/aarch64-linux-android-arbin993088 -> 884320 bytes
-rwxr-xr-xbin/aarch64-linux-android-asbin1486248 -> 1330176 bytes
-rwxr-xr-xbin/aarch64-linux-android-c++filtbin961624 -> 851736 bytes
-rwxr-xr-xbin/aarch64-linux-android-cppbin898376 -> 897096 bytes
-rwxr-xr-xbin/aarch64-linux-android-dwpbin2838664 -> 2745256 bytes
-rwxr-xr-xbin/aarch64-linux-android-elfeditbin28584 -> 27976 bytes
-rwxr-xr-xbin/aarch64-linux-android-gcc-6.3.1 (renamed from bin/aarch64-linux-android-gcc-6.3.0)bin894280 -> 895496 bytes
-rwxr-xr-xbin/aarch64-linux-android-gcc-arbin25992 -> 25992 bytes
-rwxr-xr-xbin/aarch64-linux-android-gcc-nmbin25960 -> 25960 bytes
-rwxr-xr-xbin/aarch64-linux-android-gcc-ranlibbin25960 -> 25960 bytes
-rwxr-xr-xbin/aarch64-linux-android-gcovbin480776 -> 480840 bytes
-rwxr-xr-xbin/aarch64-linux-android-gcov-toolbin442472 -> 442472 bytes
-rwxr-xr-xbin/aarch64-linux-android-gfortranbin898376 -> 898152 bytes
-rwxr-xr-xbin/aarch64-linux-android-gprofbin1028664 -> 918904 bytes
-rwxr-xr-xbin/aarch64-linux-android-ld.bfdbin2046920 -> 1916704 bytes
-rwxr-xr-xbin/aarch64-linux-android-ld.goldbin4950264 -> 4491560 bytes
-rwxr-xr-xbin/aarch64-linux-android-nmbin977464 -> 864920 bytes
-rwxr-xr-xbin/aarch64-linux-android-objcopybin1156600 -> 1040120 bytes
-rwxr-xr-xbin/aarch64-linux-android-objdumpbin1688024 -> 1487992 bytes
-rwxr-xr-xbin/aarch64-linux-android-ranlibbin993088 -> 884320 bytes
-rwxr-xr-xbin/aarch64-linux-android-readelfbin498056 -> 436168 bytes
-rwxr-xr-xbin/aarch64-linux-android-sizebin964568 -> 855608 bytes
-rwxr-xr-xbin/aarch64-linux-android-stringsbin964504 -> 855704 bytes
-rwxr-xr-xbin/aarch64-linux-android-stripbin1156600 -> 1040152 bytes
-rwxr-xr-xbin/real-aarch64-linux-android-g++bin898376 -> 898792 bytes
-rwxr-xr-xbin/real-aarch64-linux-android-gccbin894280 -> 895496 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.0/crtbegin.obin3608 -> 0 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.0/crtbeginS.obin3984 -> 0 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.0/crtbeginT.obin3608 -> 0 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.0/crtend.obin1392 -> 0 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.0/crtendS.obin1392 -> 0 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.0/crtfastmath.obin3448 -> 0 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.0/libcaf_single.abin143942 -> 0 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.0/libgcc.abin1195006 -> 0 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.0/libgcov.abin358154 -> 0 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/crtbegin.obin0 -> 3664 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/crtbeginS.obin0 -> 4040 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/crtbeginT.obin0 -> 3664 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/crtend.obin0 -> 1424 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/crtendS.obin0 -> 1424 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/crtfastmath.obin0 -> 3496 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/finclude/ieee_arithmetic.mod (renamed from lib/gcc/aarch64-linux-android/6.3.0/finclude/ieee_arithmetic.mod)bin6566 -> 6566 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/finclude/ieee_exceptions.mod (renamed from lib/gcc/aarch64-linux-android/6.3.0/finclude/ieee_exceptions.mod)bin1442 -> 1442 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/finclude/ieee_features.mod (renamed from lib/gcc/aarch64-linux-android/6.3.0/finclude/ieee_features.mod)bin667 -> 667 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/finclude/omp_lib.f90 (renamed from lib/gcc/aarch64-linux-android/6.3.0/finclude/omp_lib.f90)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/finclude/omp_lib.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/finclude/omp_lib.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/finclude/omp_lib.mod (renamed from lib/gcc/aarch64-linux-android/6.3.0/finclude/omp_lib.mod)bin2981 -> 2981 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/finclude/omp_lib_kinds.mod (renamed from lib/gcc/aarch64-linux-android/6.3.0/finclude/omp_lib_kinds.mod)bin608 -> 608 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/finclude/openacc.f90 (renamed from lib/gcc/aarch64-linux-android/6.3.0/finclude/openacc.f90)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/finclude/openacc.mod (renamed from lib/gcc/aarch64-linux-android/6.3.0/finclude/openacc.mod)bin3523 -> 3523 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/finclude/openacc_kinds.mod (renamed from lib/gcc/aarch64-linux-android/6.3.0/finclude/openacc_kinds.mod)bin506 -> 506 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/finclude/openacc_lib.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/finclude/openacc_lib.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include-fixed/README (renamed from lib/gcc/aarch64-linux-android/6.3.0/include-fixed/README)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include-fixed/limits.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include-fixed/limits.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include-fixed/linux/a.out.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include-fixed/linux/a.out.h)2
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include-fixed/syslimits.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include-fixed/syslimits.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/arm_acle.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/arm_acle.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/arm_fp16.h579
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/arm_neon.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/arm_neon.h)13597
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/float.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/float.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/iso646.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/iso646.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/objc/NXConstStr.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/objc/NXConstStr.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/objc/Object.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/objc/Object.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/objc/Protocol.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/objc/Protocol.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/objc/message.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/objc/message.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/objc/objc-decls.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/objc/objc-decls.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/objc/objc-exception.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/objc/objc-exception.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/objc/objc-sync.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/objc/objc-sync.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/objc/objc.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/objc/objc.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/objc/runtime.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/objc/runtime.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/objc/thr.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/objc/thr.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/omp.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/omp.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/openacc.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/openacc.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/stdalign.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/stdalign.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/stdarg.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/stdarg.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/stdatomic.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/stdatomic.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/stdbool.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/stdbool.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/stddef.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/stddef.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/stdfix.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/stdfix.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/stdint-gcc.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/stdint-gcc.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/stdint.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/stdint.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/stdnoreturn.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/stdnoreturn.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/unwind.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/unwind.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/include/varargs.h (renamed from lib/gcc/aarch64-linux-android/6.3.0/include/varargs.h)0
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/libcaf_single.abin0 -> 143974 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/libgcc.abin0 -> 1228062 bytes
-rw-r--r--lib/gcc/aarch64-linux-android/6.3.1/libgcov.abin0 -> 363674 bytes
-rwxr-xr-xlibexec/gcc/aarch64-linux-android/6.3.0/plugin/gengtypebin184640 -> 0 bytes
-rwxr-xr-xlibexec/gcc/aarch64-linux-android/6.3.1/cc1 (renamed from libexec/gcc/aarch64-linux-android/6.3.0/cc1obj)bin17216328 -> 17239304 bytes
-rwxr-xr-xlibexec/gcc/aarch64-linux-android/6.3.1/cc1obj (renamed from libexec/gcc/aarch64-linux-android/6.3.0/cc1)bin17010344 -> 17446696 bytes
-rwxr-xr-xlibexec/gcc/aarch64-linux-android/6.3.1/cc1objplus (renamed from libexec/gcc/aarch64-linux-android/6.3.0/cc1plus)bin18341624 -> 18782040 bytes
-rwxr-xr-xlibexec/gcc/aarch64-linux-android/6.3.1/cc1plus (renamed from libexec/gcc/aarch64-linux-android/6.3.0/cc1objplus)bin18551672 -> 18570552 bytes
-rwxr-xr-xlibexec/gcc/aarch64-linux-android/6.3.1/collect2 (renamed from libexec/gcc/aarch64-linux-android/6.3.0/collect2)bin500008 -> 500008 bytes
-rwxr-xr-xlibexec/gcc/aarch64-linux-android/6.3.1/f951 (renamed from libexec/gcc/aarch64-linux-android/6.3.0/f951)bin17838904 -> 18069816 bytes
l---------libexec/gcc/aarch64-linux-android/6.3.1/liblto_plugin.so (renamed from libexec/gcc/aarch64-linux-android/6.3.0/liblto_plugin.so)0
l---------libexec/gcc/aarch64-linux-android/6.3.1/liblto_plugin.so.0 (renamed from libexec/gcc/aarch64-linux-android/6.3.0/liblto_plugin.so.0)0
-rwxr-xr-xlibexec/gcc/aarch64-linux-android/6.3.1/liblto_plugin.so.0.0.0 (renamed from libexec/gcc/aarch64-linux-android/6.3.0/liblto_plugin.so.0.0.0)bin91736 -> 91736 bytes
-rwxr-xr-xlibexec/gcc/aarch64-linux-android/6.3.1/lto-wrapper (renamed from libexec/gcc/aarch64-linux-android/6.3.0/lto-wrapper)bin801528 -> 801528 bytes
-rwxr-xr-xlibexec/gcc/aarch64-linux-android/6.3.1/lto1 (renamed from libexec/gcc/aarch64-linux-android/6.3.0/lto1)bin16162632 -> 16393160 bytes
-rwxr-xr-xlibexec/gcc/aarch64-linux-android/6.3.1/plugin/gengtypebin0 -> 184640 bytes
-rw-r--r--repo.prop24
244 files changed, 11006 insertions, 4924 deletions
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf.x b/aarch64-linux-android/lib/ldscripts/aarch64elf.x
index e6d58b6..8c4f6e6 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf.x
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf.x
@@ -1,5 +1,5 @@
/* Default linker script, for normal executables */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,11 +65,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -77,8 +76,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf.xbn b/aarch64-linux-android/lib/ldscripts/aarch64elf.xbn
index 01e8157..8d99718 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf.xbn
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf.xbn
@@ -1,5 +1,5 @@
/* Script for -N: mix text and data on same page; don't align data */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,11 +65,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -77,8 +76,7 @@ SECTIONS
the same address within the page on the next page up. */
. = .;
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf.xc b/aarch64-linux-android/lib/ldscripts/aarch64elf.xc
index 707a382..a3cbf8a 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf.xc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf.xc
@@ -1,5 +1,5 @@
/* Script for -z combreloc: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,11 +67,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -79,8 +78,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf.xn b/aarch64-linux-android/lib/ldscripts/aarch64elf.xn
index 2a9833d..8a24e12 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf.xn
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf.xn
@@ -1,5 +1,5 @@
/* Script for -n: mix text and data on same page */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,11 +65,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -77,8 +76,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf.xr b/aarch64-linux-android/lib/ldscripts/aarch64elf.xr
index 7d7578f..718fff1 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf.xr
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf.xr
@@ -1,5 +1,5 @@
/* Script for ld -r: link without relocation */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -56,19 +56,17 @@ SECTIONS
} =0
.rodata 0 : { *(.rodata) }
.rodata1 0 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf.xs b/aarch64-linux-android/lib/ldscripts/aarch64elf.xs
index a54c8c2..d181f94 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf.xs
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf.xs
@@ -1,5 +1,5 @@
/* Script for ld --shared: link shared library */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -62,11 +62,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -74,8 +73,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf.xsc b/aarch64-linux-android/lib/ldscripts/aarch64elf.xsc
index 26daa08..21efc26 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf.xsc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf.xsc
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -64,11 +64,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -76,8 +75,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf.xsw b/aarch64-linux-android/lib/ldscripts/aarch64elf.xsw
index 4467287..e45fc50 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf.xsw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf.xsw
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -64,11 +64,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -76,8 +75,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf.xu b/aarch64-linux-android/lib/ldscripts/aarch64elf.xu
index 834b464..6376303 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf.xu
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf.xu
@@ -1,5 +1,5 @@
/* Script for ld -Ur: link w/out relocation, do create constructors */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -56,19 +56,17 @@ SECTIONS
} =0
.rodata 0 : { *(.rodata) }
.rodata1 0 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf.xw b/aarch64-linux-android/lib/ldscripts/aarch64elf.xw
index 1ed7399..6887bd0 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf.xw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf.xw
@@ -1,5 +1,5 @@
/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,11 +67,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -79,8 +78,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32.x b/aarch64-linux-android/lib/ldscripts/aarch64elf32.x
index 178897b..f97a823 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32.x
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32.x
@@ -1,5 +1,5 @@
/* Default linker script, for normal executables */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,11 +65,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -77,8 +76,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xbn b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xbn
index d0d3f51..11117de 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xbn
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xbn
@@ -1,5 +1,5 @@
/* Script for -N: mix text and data on same page; don't align data */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,11 +65,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -77,8 +76,7 @@ SECTIONS
the same address within the page on the next page up. */
. = .;
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xc b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xc
index 38c1383..2cd7192 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xc
@@ -1,5 +1,5 @@
/* Script for -z combreloc: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,11 +67,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -79,8 +78,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xn b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xn
index d8d5f14..8a65278 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xn
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xn
@@ -1,5 +1,5 @@
/* Script for -n: mix text and data on same page */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,11 +65,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -77,8 +76,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xr b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xr
index c02bf81..e39cca7 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xr
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xr
@@ -1,5 +1,5 @@
/* Script for ld -r: link without relocation */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -56,19 +56,17 @@ SECTIONS
} =0
.rodata 0 : { *(.rodata) }
.rodata1 0 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xs b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xs
index 181017c..65b6003 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xs
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xs
@@ -1,5 +1,5 @@
/* Script for ld --shared: link shared library */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -62,11 +62,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -74,8 +73,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xsc b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xsc
index 4af765f..c003ab6 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xsc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xsc
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -64,11 +64,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -76,8 +75,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xsw b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xsw
index 89cff06..b3947b4 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xsw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xsw
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -64,11 +64,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -76,8 +75,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xu b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xu
index e01da52..bb5ae41 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xu
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xu
@@ -1,5 +1,5 @@
/* Script for ld -Ur: link w/out relocation, do create constructors */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -56,19 +56,17 @@ SECTIONS
} =0
.rodata 0 : { *(.rodata) }
.rodata1 0 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xw b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xw
index c478cbe..67f5a29 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32.xw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32.xw
@@ -1,5 +1,5 @@
/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,11 +67,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -79,8 +78,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.x b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.x
index ac83f20..bca3596 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.x
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.x
@@ -1,5 +1,5 @@
/* Default linker script, for normal executables */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,11 +65,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -77,8 +76,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xbn b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xbn
index e99ecff..f62f938 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xbn
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xbn
@@ -1,5 +1,5 @@
/* Script for -N: mix text and data on same page; don't align data */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,11 +65,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -77,8 +76,7 @@ SECTIONS
the same address within the page on the next page up. */
. = .;
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xc b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xc
index c005347..ac9aa29 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xc
@@ -1,5 +1,5 @@
/* Script for -z combreloc: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,11 +67,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -79,8 +78,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xn b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xn
index 5dc1ce2..5dc9d46 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xn
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xn
@@ -1,5 +1,5 @@
/* Script for -n: mix text and data on same page */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,11 +65,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -77,8 +76,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xr b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xr
index e5822cd..e7d4838 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xr
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xr
@@ -1,5 +1,5 @@
/* Script for ld -r: link without relocation */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -56,19 +56,17 @@ SECTIONS
} =0
.rodata 0 : { *(.rodata) }
.rodata1 0 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xs b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xs
index d203768..335766e 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xs
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xs
@@ -1,5 +1,5 @@
/* Script for ld --shared: link shared library */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -62,11 +62,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -74,8 +73,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xsc b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xsc
index 782a7f9..239dd44 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xsc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xsc
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -64,11 +64,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -76,8 +75,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xsw b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xsw
index 271aa3b..37e1313 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xsw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xsw
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -64,11 +64,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -76,8 +75,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xu b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xu
index 72ed3cc..0e80618 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xu
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xu
@@ -1,5 +1,5 @@
/* Script for ld -Ur: link w/out relocation, do create constructors */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -56,19 +56,17 @@ SECTIONS
} =0
.rodata 0 : { *(.rodata) }
.rodata1 0 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xw b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xw
index a55a425..c1d664c 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elf32b.xw
@@ -1,5 +1,5 @@
/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,11 +67,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -79,8 +78,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elfb.x b/aarch64-linux-android/lib/ldscripts/aarch64elfb.x
index a5daf9a..1dcbdb7 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elfb.x
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elfb.x
@@ -1,5 +1,5 @@
/* Default linker script, for normal executables */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,11 +65,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -77,8 +76,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xbn b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xbn
index 61ae792..c9a41cf 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xbn
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xbn
@@ -1,5 +1,5 @@
/* Script for -N: mix text and data on same page; don't align data */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,11 +65,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -77,8 +76,7 @@ SECTIONS
the same address within the page on the next page up. */
. = .;
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xc b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xc
index 8580d6a..e4d6e58 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xc
@@ -1,5 +1,5 @@
/* Script for -z combreloc: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,11 +67,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -79,8 +78,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xn b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xn
index 94ff92f..c25901e 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xn
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xn
@@ -1,5 +1,5 @@
/* Script for -n: mix text and data on same page */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,11 +65,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -77,8 +76,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xr b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xr
index 04e8df0..94bb33d 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xr
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xr
@@ -1,5 +1,5 @@
/* Script for ld -r: link without relocation */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -56,19 +56,17 @@ SECTIONS
} =0
.rodata 0 : { *(.rodata) }
.rodata1 0 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xs b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xs
index c790703..c0bf94a 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xs
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xs
@@ -1,5 +1,5 @@
/* Script for ld --shared: link shared library */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -62,11 +62,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -74,8 +73,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xsc b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xsc
index 41b92e4..8b2579f 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xsc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xsc
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -64,11 +64,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -76,8 +75,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xsw b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xsw
index b5949c6..5be152d 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xsw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xsw
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -64,11 +64,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -76,8 +75,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xu b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xu
index 4ae683d..58e175c 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xu
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xu
@@ -1,5 +1,5 @@
/* Script for ld -Ur: link w/out relocation, do create constructors */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -56,19 +56,17 @@ SECTIONS
} =0
.rodata 0 : { *(.rodata) }
.rodata1 0 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xw b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xw
index 7fda46d..902676f 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64elfb.xw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64elfb.xw
@@ -1,5 +1,5 @@
/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,11 +67,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -79,8 +78,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux.x b/aarch64-linux-android/lib/ldscripts/aarch64linux.x
index b84707d..e2ced86 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux.x
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux.x
@@ -1,5 +1,5 @@
/* Default linker script, for normal executables */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,20 +65,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux.xbn b/aarch64-linux-android/lib/ldscripts/aarch64linux.xbn
index 75b5981..cd54976 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux.xbn
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux.xbn
@@ -1,5 +1,5 @@
/* Script for -N: mix text and data on same page; don't align data */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,11 +65,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -77,8 +76,7 @@ SECTIONS
the same address within the page on the next page up. */
. = .;
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux.xc b/aarch64-linux-android/lib/ldscripts/aarch64linux.xc
index 35503f9..bf051ac 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux.xc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux.xc
@@ -1,5 +1,5 @@
/* Script for -z combreloc: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,20 +67,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux.xd b/aarch64-linux-android/lib/ldscripts/aarch64linux.xd
index c68cf79..8043dbf 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux.xd
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux.xd
@@ -1,5 +1,5 @@
/* Script for ld -pie: link position independent executable */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,20 +65,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux.xdc b/aarch64-linux-android/lib/ldscripts/aarch64linux.xdc
index 5a4eb6a..d8a39a1 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux.xdc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux.xdc
@@ -1,5 +1,5 @@
/* Script for -pie -z combreloc: position independent executable, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,20 +67,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux.xdw b/aarch64-linux-android/lib/ldscripts/aarch64linux.xdw
index c24952b..99b60c0 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux.xdw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux.xdw
@@ -1,5 +1,5 @@
/* Script for -pie -z combreloc -z now -z relro: position independent executable, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,20 +67,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux.xn b/aarch64-linux-android/lib/ldscripts/aarch64linux.xn
index 240b93c..b923ac3 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux.xn
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux.xn
@@ -1,5 +1,5 @@
/* Script for -n: mix text and data on same page */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,20 +65,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux.xr b/aarch64-linux-android/lib/ldscripts/aarch64linux.xr
index 2ac4800..4404f64 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux.xr
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux.xr
@@ -1,5 +1,5 @@
/* Script for ld -r: link without relocation */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -56,19 +56,17 @@ SECTIONS
} =0
.rodata 0 : { *(.rodata) }
.rodata1 0 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux.xs b/aarch64-linux-android/lib/ldscripts/aarch64linux.xs
index cbfe382..2f8bf48 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux.xs
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux.xs
@@ -1,5 +1,5 @@
/* Script for ld --shared: link shared library */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -62,20 +62,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux.xsc b/aarch64-linux-android/lib/ldscripts/aarch64linux.xsc
index ab8b86d..a133295 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux.xsc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux.xsc
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -64,20 +64,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux.xsw b/aarch64-linux-android/lib/ldscripts/aarch64linux.xsw
index 921b163..0b874b6 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux.xsw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux.xsw
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -64,20 +64,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux.xu b/aarch64-linux-android/lib/ldscripts/aarch64linux.xu
index 3721894..9a38aea 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux.xu
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux.xu
@@ -1,5 +1,5 @@
/* Script for ld -Ur: link w/out relocation, do create constructors */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -56,19 +56,17 @@ SECTIONS
} =0
.rodata 0 : { *(.rodata) }
.rodata1 0 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux.xw b/aarch64-linux-android/lib/ldscripts/aarch64linux.xw
index e16727d..23344dc 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux.xw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux.xw
@@ -1,5 +1,5 @@
/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,20 +67,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32.x b/aarch64-linux-android/lib/ldscripts/aarch64linux32.x
index f2b8572..ca42e5b 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32.x
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32.x
@@ -1,5 +1,5 @@
/* Default linker script, for normal executables */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,20 +65,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xbn b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xbn
index 6a9aa55..83e82ae 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xbn
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xbn
@@ -1,5 +1,5 @@
/* Script for -N: mix text and data on same page; don't align data */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,11 +65,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -77,8 +76,7 @@ SECTIONS
the same address within the page on the next page up. */
. = .;
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xc b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xc
index 1cba5d8..025c19f 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xc
@@ -1,5 +1,5 @@
/* Script for -z combreloc: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,20 +67,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xd b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xd
index 7bb5090..9eefa7c 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xd
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xd
@@ -1,5 +1,5 @@
/* Script for ld -pie: link position independent executable */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,20 +65,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xdc b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xdc
index 5cb2f74..2580406 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xdc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xdc
@@ -1,5 +1,5 @@
/* Script for -pie -z combreloc: position independent executable, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,20 +67,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xdw b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xdw
index 25b2fb7..dbc683c 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xdw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xdw
@@ -1,5 +1,5 @@
/* Script for -pie -z combreloc -z now -z relro: position independent executable, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,20 +67,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xn b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xn
index 38d2faf..183e274 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xn
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xn
@@ -1,5 +1,5 @@
/* Script for -n: mix text and data on same page */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,20 +65,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xr b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xr
index 67e8093..9c94659 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xr
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xr
@@ -1,5 +1,5 @@
/* Script for ld -r: link without relocation */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -56,19 +56,17 @@ SECTIONS
} =0
.rodata 0 : { *(.rodata) }
.rodata1 0 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xs b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xs
index ef070f1..79657f0 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xs
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xs
@@ -1,5 +1,5 @@
/* Script for ld --shared: link shared library */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -62,20 +62,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xsc b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xsc
index 25d4ca9..6a5efd3 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xsc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xsc
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -64,20 +64,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xsw b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xsw
index e65280b..42be4fa 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xsw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xsw
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -64,20 +64,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xu b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xu
index d34ccc4..26d2dd8 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xu
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xu
@@ -1,5 +1,5 @@
/* Script for ld -Ur: link w/out relocation, do create constructors */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -56,19 +56,17 @@ SECTIONS
} =0
.rodata 0 : { *(.rodata) }
.rodata1 0 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xw b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xw
index f8a530f..f8833bb 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32.xw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32.xw
@@ -1,5 +1,5 @@
/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littleaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,20 +67,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.x b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.x
index 4a234eb..5003bf0 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.x
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.x
@@ -1,5 +1,5 @@
/* Default linker script, for normal executables */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,20 +65,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xbn b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xbn
index c97e152..c93aca2 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xbn
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xbn
@@ -1,5 +1,5 @@
/* Script for -N: mix text and data on same page; don't align data */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,11 +65,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -77,8 +76,7 @@ SECTIONS
the same address within the page on the next page up. */
. = .;
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xc b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xc
index 348ef68..f66162d 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xc
@@ -1,5 +1,5 @@
/* Script for -z combreloc: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,20 +67,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xd b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xd
index 8a47cdd..32b178b 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xd
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xd
@@ -1,5 +1,5 @@
/* Script for ld -pie: link position independent executable */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,20 +65,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xdc b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xdc
index ed04523..107fc79 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xdc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xdc
@@ -1,5 +1,5 @@
/* Script for -pie -z combreloc: position independent executable, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,20 +67,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xdw b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xdw
index d04fdc3..8a20375 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xdw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xdw
@@ -1,5 +1,5 @@
/* Script for -pie -z combreloc -z now -z relro: position independent executable, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,20 +67,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xn b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xn
index f8f38ce..5c8a109 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xn
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xn
@@ -1,5 +1,5 @@
/* Script for -n: mix text and data on same page */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,20 +65,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xr b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xr
index a000fb7..ee330a6 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xr
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xr
@@ -1,5 +1,5 @@
/* Script for ld -r: link without relocation */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -56,19 +56,17 @@ SECTIONS
} =0
.rodata 0 : { *(.rodata) }
.rodata1 0 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xs b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xs
index 482524a..887adfb 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xs
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xs
@@ -1,5 +1,5 @@
/* Script for ld --shared: link shared library */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -62,20 +62,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xsc b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xsc
index b36422d..b816b1a 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xsc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xsc
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -64,20 +64,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xsw b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xsw
index 3f693e7..fe8a508 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xsw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xsw
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -64,20 +64,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xu b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xu
index d112555..b0da46f 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xu
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xu
@@ -1,5 +1,5 @@
/* Script for ld -Ur: link w/out relocation, do create constructors */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -56,19 +56,17 @@ SECTIONS
} =0
.rodata 0 : { *(.rodata) }
.rodata1 0 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xw b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xw
index fbee43b..595c183 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linux32b.xw
@@ -1,5 +1,5 @@
/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigaarch64", "elf32-bigaarch64",
"elf32-littleaarch64")
OUTPUT_ARCH(aarch64:ilp32)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/libilp32"); SEARCH_DIR("=/usr/local/libilp32"); SEARCH_DIR("=/libilp32"); SEARCH_DIR("=/usr/libilp32"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,20 +67,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.x b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.x
index 7c64d0c..da81409 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.x
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.x
@@ -1,5 +1,5 @@
/* Default linker script, for normal executables */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,20 +65,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xbn b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xbn
index 124b696..f7c900e 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xbn
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xbn
@@ -1,5 +1,5 @@
/* Script for -N: mix text and data on same page; don't align data */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,11 +65,10 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -77,8 +76,7 @@ SECTIONS
the same address within the page on the next page up. */
. = .;
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xc b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xc
index ba9a7c0..386ae60 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xc
@@ -1,5 +1,5 @@
/* Script for -z combreloc: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,20 +67,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xd b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xd
index 193536e..ef2946f 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xd
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xd
@@ -1,5 +1,5 @@
/* Script for ld -pie: link position independent executable */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,20 +65,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xdc b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xdc
index 0fc8c8a..4e45af0 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xdc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xdc
@@ -1,5 +1,5 @@
/* Script for -pie -z combreloc: position independent executable, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,20 +67,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xdw b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xdw
index 44e2998..62a9b17 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xdw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xdw
@@ -1,5 +1,5 @@
/* Script for -pie -z combreloc -z now -z relro: position independent executable, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,20 +67,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xn b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xn
index 9d414c8..967b5c4 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xn
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xn
@@ -1,5 +1,5 @@
/* Script for -n: mix text and data on same page */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -65,20 +65,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xr b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xr
index b6f90bf..30a4ecf 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xr
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xr
@@ -1,5 +1,5 @@
/* Script for ld -r: link without relocation */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -56,19 +56,17 @@ SECTIONS
} =0
.rodata 0 : { *(.rodata) }
.rodata1 0 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xs b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xs
index 0fecb5b..deacba2 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xs
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xs
@@ -1,5 +1,5 @@
/* Script for ld --shared: link shared library */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -62,20 +62,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xsc b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xsc
index 74939ab..9ebf9c5 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xsc
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xsc
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -64,20 +64,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xsw b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xsw
index f73e011..1129b2d 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xsw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xsw
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -64,20 +64,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xu b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xu
index f0ec24d..24e2676 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xu
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xu
@@ -1,5 +1,5 @@
/* Script for ld -Ur: link w/out relocation, do create constructors */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -56,19 +56,17 @@ SECTIONS
} =0
.rodata 0 : { *(.rodata) }
.rodata1 0 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xw b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xw
index 9d7fd73..cb65f73 100644
--- a/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xw
+++ b/aarch64-linux-android/lib/ldscripts/aarch64linuxb.xw
@@ -1,5 +1,5 @@
/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf64-bigaarch64", "elf64-bigaarch64",
"elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib64"); SEARCH_DIR("=/usr/local/lib64"); SEARCH_DIR("=/lib64"); SEARCH_DIR("=/usr/lib64"); SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -67,20 +67,18 @@ SECTIONS
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf.x b/aarch64-linux-android/lib/ldscripts/armelf.x
index 9fecad4..472b61b 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf.x
+++ b/aarch64-linux-android/lib/ldscripts/armelf.x
@@ -1,5 +1,5 @@
/* Default linker script, for normal executables */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -95,11 +95,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -107,8 +106,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf.xbn b/aarch64-linux-android/lib/ldscripts/armelf.xbn
index ca0df3d..b4ec3e0 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf.xbn
+++ b/aarch64-linux-android/lib/ldscripts/armelf.xbn
@@ -1,5 +1,5 @@
/* Script for -N: mix text and data on same page; don't align data */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -95,11 +95,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -107,8 +106,7 @@ SECTIONS
the same address within the page on the next page up. */
. = .;
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf.xc b/aarch64-linux-android/lib/ldscripts/armelf.xc
index dcb2caf..5915ad3 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf.xc
+++ b/aarch64-linux-android/lib/ldscripts/armelf.xc
@@ -1,5 +1,5 @@
/* Script for -z combreloc: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -94,11 +94,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -106,8 +105,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf.xn b/aarch64-linux-android/lib/ldscripts/armelf.xn
index ace77be..4317c06 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf.xn
+++ b/aarch64-linux-android/lib/ldscripts/armelf.xn
@@ -1,5 +1,5 @@
/* Script for -n: mix text and data on same page */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -95,11 +95,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -107,8 +106,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf.xr b/aarch64-linux-android/lib/ldscripts/armelf.xr
index 6debc63..7442425 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf.xr
+++ b/aarch64-linux-android/lib/ldscripts/armelf.xr
@@ -1,5 +1,5 @@
/* Script for ld -r: link without relocation */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -82,19 +82,17 @@ SECTIONS
.rodata1 0 : { *(.rodata1) }
.ARM.extab 0 : { *(.ARM.extab) }
.ARM.exidx 0 : { *(.ARM.exidx) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf.xs b/aarch64-linux-android/lib/ldscripts/armelf.xs
index 0f5cbce..fa4ac79 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf.xs
+++ b/aarch64-linux-android/lib/ldscripts/armelf.xs
@@ -1,5 +1,5 @@
/* Script for ld --shared: link shared library */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -90,11 +90,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -102,8 +101,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf.xsc b/aarch64-linux-android/lib/ldscripts/armelf.xsc
index e826662..9531e7a 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf.xsc
+++ b/aarch64-linux-android/lib/ldscripts/armelf.xsc
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -89,11 +89,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -101,8 +100,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf.xsw b/aarch64-linux-android/lib/ldscripts/armelf.xsw
index 3dd0f13..6a7d36e 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf.xsw
+++ b/aarch64-linux-android/lib/ldscripts/armelf.xsw
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -89,11 +89,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -101,8 +100,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf.xu b/aarch64-linux-android/lib/ldscripts/armelf.xu
index ce83d1a..d7499ec 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf.xu
+++ b/aarch64-linux-android/lib/ldscripts/armelf.xu
@@ -1,5 +1,5 @@
/* Script for ld -Ur: link w/out relocation, do create constructors */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -82,19 +82,17 @@ SECTIONS
.rodata1 0 : { *(.rodata1) }
.ARM.extab 0 : { *(.ARM.extab) }
.ARM.exidx 0 : { *(.ARM.exidx) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf.xw b/aarch64-linux-android/lib/ldscripts/armelf.xw
index cedb717..b6f8112 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf.xw
+++ b/aarch64-linux-android/lib/ldscripts/armelf.xw
@@ -1,5 +1,5 @@
/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -94,11 +94,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -106,8 +105,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.x b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.x
index c6b7788..4d0466c 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.x
+++ b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.x
@@ -1,5 +1,5 @@
/* Default linker script, for normal executables */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -95,20 +95,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xbn b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xbn
index da63cdf..68c84a5 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xbn
+++ b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xbn
@@ -1,5 +1,5 @@
/* Script for -N: mix text and data on same page; don't align data */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -95,11 +95,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -107,8 +106,7 @@ SECTIONS
the same address within the page on the next page up. */
. = .;
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xc b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xc
index ce7da9c..f606794 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xc
+++ b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xc
@@ -1,5 +1,5 @@
/* Script for -z combreloc: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -94,20 +94,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xd b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xd
index f763877..d9a0970 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xd
+++ b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xd
@@ -1,5 +1,5 @@
/* Script for ld -pie: link position independent executable */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -95,20 +95,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xdc b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xdc
index 6885259..c34f583 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xdc
+++ b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xdc
@@ -1,5 +1,5 @@
/* Script for -pie -z combreloc: position independent executable, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -94,20 +94,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xdw b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xdw
index 6f9dde2..a630ae5 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xdw
+++ b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xdw
@@ -1,5 +1,5 @@
/* Script for -pie -z combreloc -z now -z relro: position independent executable, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -94,20 +94,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xn b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xn
index a5a52bd..0364e82 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xn
+++ b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xn
@@ -1,5 +1,5 @@
/* Script for -n: mix text and data on same page */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -95,20 +95,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xr b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xr
index 3a747da..44e9019 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xr
+++ b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xr
@@ -1,5 +1,5 @@
/* Script for ld -r: link without relocation */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -82,19 +82,17 @@ SECTIONS
.rodata1 0 : { *(.rodata1) }
.ARM.extab 0 : { *(.ARM.extab) }
.ARM.exidx 0 : { *(.ARM.exidx) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xs b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xs
index 5821562..edd80f8 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xs
+++ b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xs
@@ -1,5 +1,5 @@
/* Script for ld --shared: link shared library */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -90,20 +90,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xsc b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xsc
index 1de2ef9..13bb4e9 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xsc
+++ b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xsc
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -89,20 +89,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xsw b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xsw
index 54dfc6f..14cb0a8 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xsw
+++ b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xsw
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -89,20 +89,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xu b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xu
index 1a105c5..e547c02 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xu
+++ b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xu
@@ -1,5 +1,5 @@
/* Script for ld -Ur: link w/out relocation, do create constructors */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -82,19 +82,17 @@ SECTIONS
.rodata1 0 : { *(.rodata1) }
.ARM.extab 0 : { *(.ARM.extab) }
.ARM.exidx 0 : { *(.ARM.exidx) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xw b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xw
index 83e63d4..3850547 100644
--- a/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xw
+++ b/aarch64-linux-android/lib/ldscripts/armelf_linux_eabi.xw
@@ -1,5 +1,5 @@
/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -94,20 +94,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb.x b/aarch64-linux-android/lib/ldscripts/armelfb.x
index 19fd652..79c706b 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb.x
+++ b/aarch64-linux-android/lib/ldscripts/armelfb.x
@@ -1,5 +1,5 @@
/* Default linker script, for normal executables */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -95,11 +95,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -107,8 +106,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb.xbn b/aarch64-linux-android/lib/ldscripts/armelfb.xbn
index b7a21f5..847124f 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb.xbn
+++ b/aarch64-linux-android/lib/ldscripts/armelfb.xbn
@@ -1,5 +1,5 @@
/* Script for -N: mix text and data on same page; don't align data */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -95,11 +95,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -107,8 +106,7 @@ SECTIONS
the same address within the page on the next page up. */
. = .;
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb.xc b/aarch64-linux-android/lib/ldscripts/armelfb.xc
index 23656e9..60f9e7c 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb.xc
+++ b/aarch64-linux-android/lib/ldscripts/armelfb.xc
@@ -1,5 +1,5 @@
/* Script for -z combreloc: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -94,11 +94,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -106,8 +105,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb.xn b/aarch64-linux-android/lib/ldscripts/armelfb.xn
index 668f487..e05c070 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb.xn
+++ b/aarch64-linux-android/lib/ldscripts/armelfb.xn
@@ -1,5 +1,5 @@
/* Script for -n: mix text and data on same page */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -95,11 +95,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -107,8 +106,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb.xr b/aarch64-linux-android/lib/ldscripts/armelfb.xr
index f1ecd76..d7cf7e7 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb.xr
+++ b/aarch64-linux-android/lib/ldscripts/armelfb.xr
@@ -1,5 +1,5 @@
/* Script for ld -r: link without relocation */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -82,19 +82,17 @@ SECTIONS
.rodata1 0 : { *(.rodata1) }
.ARM.extab 0 : { *(.ARM.extab) }
.ARM.exidx 0 : { *(.ARM.exidx) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb.xs b/aarch64-linux-android/lib/ldscripts/armelfb.xs
index 8e91cf6..af44c1a 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb.xs
+++ b/aarch64-linux-android/lib/ldscripts/armelfb.xs
@@ -1,5 +1,5 @@
/* Script for ld --shared: link shared library */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -90,11 +90,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -102,8 +101,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb.xsc b/aarch64-linux-android/lib/ldscripts/armelfb.xsc
index e66aaf7..64fdb2c 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb.xsc
+++ b/aarch64-linux-android/lib/ldscripts/armelfb.xsc
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -89,11 +89,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -101,8 +100,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb.xsw b/aarch64-linux-android/lib/ldscripts/armelfb.xsw
index 667d738..e52b71d 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb.xsw
+++ b/aarch64-linux-android/lib/ldscripts/armelfb.xsw
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -89,11 +89,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -101,8 +100,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb.xu b/aarch64-linux-android/lib/ldscripts/armelfb.xu
index ba383b3..c90ad5e 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb.xu
+++ b/aarch64-linux-android/lib/ldscripts/armelfb.xu
@@ -1,5 +1,5 @@
/* Script for ld -Ur: link w/out relocation, do create constructors */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -82,19 +82,17 @@ SECTIONS
.rodata1 0 : { *(.rodata1) }
.ARM.extab 0 : { *(.ARM.extab) }
.ARM.exidx 0 : { *(.ARM.exidx) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb.xw b/aarch64-linux-android/lib/ldscripts/armelfb.xw
index 1f8650a..ff9b693 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb.xw
+++ b/aarch64-linux-android/lib/ldscripts/armelfb.xw
@@ -1,5 +1,5 @@
/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -94,11 +94,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -106,8 +105,7 @@ SECTIONS
the same address within the page on the next page up. */
. = ALIGN(CONSTANT (MAXPAGESIZE)) + (. & (CONSTANT (MAXPAGESIZE) - 1));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.x b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.x
index 5a4fa03..c2771b3 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.x
+++ b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.x
@@ -1,5 +1,5 @@
/* Default linker script, for normal executables */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -95,20 +95,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xbn b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xbn
index 461f9d4..dd35c3a 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xbn
+++ b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xbn
@@ -1,5 +1,5 @@
/* Script for -N: mix text and data on same page; don't align data */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -95,11 +95,10 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
@@ -107,8 +106,7 @@ SECTIONS
the same address within the page on the next page up. */
. = .;
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xc b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xc
index 536ac00..68070fd 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xc
+++ b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xc
@@ -1,5 +1,5 @@
/* Script for -z combreloc: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -94,20 +94,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xd b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xd
index bc6fe66..40dbfcd 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xd
+++ b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xd
@@ -1,5 +1,5 @@
/* Script for ld -pie: link position independent executable */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -95,20 +95,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xdc b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xdc
index 4a397d2..bd238ce 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xdc
+++ b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xdc
@@ -1,5 +1,5 @@
/* Script for -pie -z combreloc: position independent executable, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -94,20 +94,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xdw b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xdw
index 9bf8cb1..26c3352 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xdw
+++ b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xdw
@@ -1,5 +1,5 @@
/* Script for -pie -z combreloc -z now -z relro: position independent executable, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -94,20 +94,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xn b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xn
index 03c307b..e985469 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xn
+++ b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xn
@@ -1,5 +1,5 @@
/* Script for -n: mix text and data on same page */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -95,20 +95,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xr b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xr
index a4a34ca..81f6eef 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xr
+++ b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xr
@@ -1,5 +1,5 @@
/* Script for ld -r: link without relocation */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -82,19 +82,17 @@ SECTIONS
.rodata1 0 : { *(.rodata1) }
.ARM.extab 0 : { *(.ARM.extab) }
.ARM.exidx 0 : { *(.ARM.exidx) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xs b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xs
index adafd83..67d2d6c 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xs
+++ b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xs
@@ -1,5 +1,5 @@
/* Script for ld --shared: link shared library */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -90,20 +90,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xsc b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xsc
index 4645657..a25cb04 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xsc
+++ b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xsc
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -89,20 +89,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xsw b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xsw
index 640b7f7..2cc8f75 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xsw
+++ b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xsw
@@ -1,5 +1,5 @@
/* Script for --shared -z combreloc -z now -z relro: shared library, combine & sort relocs */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -89,20 +89,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xu b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xu
index 62b2708..c7f920e 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xu
+++ b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xu
@@ -1,5 +1,5 @@
/* Script for ld -Ur: link w/out relocation, do create constructors */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -82,19 +82,17 @@ SECTIONS
.rodata1 0 : { *(.rodata1) }
.ARM.extab 0 : { *(.ARM.extab) }
.ARM.exidx 0 : { *(.ARM.exidx) }
- .eh_frame_hdr : { *(.eh_frame_hdr) }
- .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame 0 : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab 0 : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges 0 : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
/* Exception handling */
- .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
- .gnu_extab 0 : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame 0 : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table 0 : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges 0 : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xw b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xw
index 98a8d08..1971102 100644
--- a/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xw
+++ b/aarch64-linux-android/lib/ldscripts/armelfb_linux_eabi.xw
@@ -1,5 +1,5 @@
/* Script for -z combreloc -z now -z relro: combine and sort reloc sections */
-/* Copyright (C) 2014-2016 Free Software Foundation, Inc.
+/* Copyright (C) 2014 Free Software Foundation, Inc.
Copying and distribution of this script, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. */
@@ -7,7 +7,7 @@ OUTPUT_FORMAT("elf32-bigarm", "elf32-bigarm",
"elf32-littlearm")
OUTPUT_ARCH(arm)
ENTRY(_start)
-SEARCH_DIR("=/tmp/32882bbc6f4a79c33d52361d8217b885/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SEARCH_DIR("=/tmp/e0c13069eac71edbeaff7853acb9c12f/aarch64-linux-android/lib"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
@@ -94,20 +94,18 @@ SECTIONS
PROVIDE_HIDDEN (__exidx_start = .);
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
PROVIDE_HIDDEN (__exidx_end = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) }
- .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
.gcc_except_table.*) }
- .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) }
/* These sections are generated by the Sun/Oracle C++ compiler. */
.exception_ranges : ONLY_IF_RO { *(.exception_ranges
.exception_ranges*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
- . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
/* Exception handling */
- .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) }
- .gnu_extab : ONLY_IF_RW { *(.gnu_extab) }
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
.exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) }
/* Thread Local Storage sections */
diff --git a/aarch64-linux-android/lib64/libatomic.a b/aarch64-linux-android/lib64/libatomic.a
index afe18f5..2aaff71 100644
--- a/aarch64-linux-android/lib64/libatomic.a
+++ b/aarch64-linux-android/lib64/libatomic.a
Binary files differ
diff --git a/aarch64-linux-android/lib64/libgfortran.a b/aarch64-linux-android/lib64/libgfortran.a
index 0920f21..1b78bd8 100644
--- a/aarch64-linux-android/lib64/libgfortran.a
+++ b/aarch64-linux-android/lib64/libgfortran.a
Binary files differ
diff --git a/aarch64-linux-android/lib64/libgomp.a b/aarch64-linux-android/lib64/libgomp.a
index 34bfe98..3e72bf7 100644
--- a/aarch64-linux-android/lib64/libgomp.a
+++ b/aarch64-linux-android/lib64/libgomp.a
Binary files differ
diff --git a/aarch64-linux-android/lib64/libobjc.a b/aarch64-linux-android/lib64/libobjc.a
index 3c5f536..9bffb83 100644
--- a/aarch64-linux-android/lib64/libobjc.a
+++ b/aarch64-linux-android/lib64/libobjc.a
Binary files differ
diff --git a/bin/aarch64-linux-android-addr2line b/bin/aarch64-linux-android-addr2line
index 5075616..bf47159 100755
--- a/bin/aarch64-linux-android-addr2line
+++ b/bin/aarch64-linux-android-addr2line
Binary files differ
diff --git a/bin/aarch64-linux-android-ar b/bin/aarch64-linux-android-ar
index 8e9544d..d51cc55 100755
--- a/bin/aarch64-linux-android-ar
+++ b/bin/aarch64-linux-android-ar
Binary files differ
diff --git a/bin/aarch64-linux-android-as b/bin/aarch64-linux-android-as
index b63e0e9..dba8c70 100755
--- a/bin/aarch64-linux-android-as
+++ b/bin/aarch64-linux-android-as
Binary files differ
diff --git a/bin/aarch64-linux-android-c++filt b/bin/aarch64-linux-android-c++filt
index 0d6e94e..59e920c 100755
--- a/bin/aarch64-linux-android-c++filt
+++ b/bin/aarch64-linux-android-c++filt
Binary files differ
diff --git a/bin/aarch64-linux-android-cpp b/bin/aarch64-linux-android-cpp
index eb34052..6a68d8f 100755
--- a/bin/aarch64-linux-android-cpp
+++ b/bin/aarch64-linux-android-cpp
Binary files differ
diff --git a/bin/aarch64-linux-android-dwp b/bin/aarch64-linux-android-dwp
index 6b8894d..dff31b7 100755
--- a/bin/aarch64-linux-android-dwp
+++ b/bin/aarch64-linux-android-dwp
Binary files differ
diff --git a/bin/aarch64-linux-android-elfedit b/bin/aarch64-linux-android-elfedit
index 3a22f8c..7017d46 100755
--- a/bin/aarch64-linux-android-elfedit
+++ b/bin/aarch64-linux-android-elfedit
Binary files differ
diff --git a/bin/aarch64-linux-android-gcc-6.3.0 b/bin/aarch64-linux-android-gcc-6.3.1
index d6b1a3c..5608486 100755
--- a/bin/aarch64-linux-android-gcc-6.3.0
+++ b/bin/aarch64-linux-android-gcc-6.3.1
Binary files differ
diff --git a/bin/aarch64-linux-android-gcc-ar b/bin/aarch64-linux-android-gcc-ar
index f6ff4ec..7105b1b 100755
--- a/bin/aarch64-linux-android-gcc-ar
+++ b/bin/aarch64-linux-android-gcc-ar
Binary files differ
diff --git a/bin/aarch64-linux-android-gcc-nm b/bin/aarch64-linux-android-gcc-nm
index 5fc8459..c5cc21a 100755
--- a/bin/aarch64-linux-android-gcc-nm
+++ b/bin/aarch64-linux-android-gcc-nm
Binary files differ
diff --git a/bin/aarch64-linux-android-gcc-ranlib b/bin/aarch64-linux-android-gcc-ranlib
index 26fc884..9ae4d3f 100755
--- a/bin/aarch64-linux-android-gcc-ranlib
+++ b/bin/aarch64-linux-android-gcc-ranlib
Binary files differ
diff --git a/bin/aarch64-linux-android-gcov b/bin/aarch64-linux-android-gcov
index 61582b9..978be3e 100755
--- a/bin/aarch64-linux-android-gcov
+++ b/bin/aarch64-linux-android-gcov
Binary files differ
diff --git a/bin/aarch64-linux-android-gcov-tool b/bin/aarch64-linux-android-gcov-tool
index afea644..764afbd 100755
--- a/bin/aarch64-linux-android-gcov-tool
+++ b/bin/aarch64-linux-android-gcov-tool
Binary files differ
diff --git a/bin/aarch64-linux-android-gfortran b/bin/aarch64-linux-android-gfortran
index 191b7aa..a00b4dd 100755
--- a/bin/aarch64-linux-android-gfortran
+++ b/bin/aarch64-linux-android-gfortran
Binary files differ
diff --git a/bin/aarch64-linux-android-gprof b/bin/aarch64-linux-android-gprof
index af8cdec..6a78122 100755
--- a/bin/aarch64-linux-android-gprof
+++ b/bin/aarch64-linux-android-gprof
Binary files differ
diff --git a/bin/aarch64-linux-android-ld.bfd b/bin/aarch64-linux-android-ld.bfd
index 398ad30..4a3acde 100755
--- a/bin/aarch64-linux-android-ld.bfd
+++ b/bin/aarch64-linux-android-ld.bfd
Binary files differ
diff --git a/bin/aarch64-linux-android-ld.gold b/bin/aarch64-linux-android-ld.gold
index 9134a9b..5499068 100755
--- a/bin/aarch64-linux-android-ld.gold
+++ b/bin/aarch64-linux-android-ld.gold
Binary files differ
diff --git a/bin/aarch64-linux-android-nm b/bin/aarch64-linux-android-nm
index 09bbd6c..8b7a1e3 100755
--- a/bin/aarch64-linux-android-nm
+++ b/bin/aarch64-linux-android-nm
Binary files differ
diff --git a/bin/aarch64-linux-android-objcopy b/bin/aarch64-linux-android-objcopy
index f2c9816..5f569b6 100755
--- a/bin/aarch64-linux-android-objcopy
+++ b/bin/aarch64-linux-android-objcopy
Binary files differ
diff --git a/bin/aarch64-linux-android-objdump b/bin/aarch64-linux-android-objdump
index bd383e3..e52d67a 100755
--- a/bin/aarch64-linux-android-objdump
+++ b/bin/aarch64-linux-android-objdump
Binary files differ
diff --git a/bin/aarch64-linux-android-ranlib b/bin/aarch64-linux-android-ranlib
index 78fda8a..6045478 100755
--- a/bin/aarch64-linux-android-ranlib
+++ b/bin/aarch64-linux-android-ranlib
Binary files differ
diff --git a/bin/aarch64-linux-android-readelf b/bin/aarch64-linux-android-readelf
index 90edd5e..e1de173 100755
--- a/bin/aarch64-linux-android-readelf
+++ b/bin/aarch64-linux-android-readelf
Binary files differ
diff --git a/bin/aarch64-linux-android-size b/bin/aarch64-linux-android-size
index 4766e8d..b22fa2c 100755
--- a/bin/aarch64-linux-android-size
+++ b/bin/aarch64-linux-android-size
Binary files differ
diff --git a/bin/aarch64-linux-android-strings b/bin/aarch64-linux-android-strings
index f0ecd94..bb47b10 100755
--- a/bin/aarch64-linux-android-strings
+++ b/bin/aarch64-linux-android-strings
Binary files differ
diff --git a/bin/aarch64-linux-android-strip b/bin/aarch64-linux-android-strip
index 395e8dd..3cde8bd 100755
--- a/bin/aarch64-linux-android-strip
+++ b/bin/aarch64-linux-android-strip
Binary files differ
diff --git a/bin/real-aarch64-linux-android-g++ b/bin/real-aarch64-linux-android-g++
index d5ea543..2234c21 100755
--- a/bin/real-aarch64-linux-android-g++
+++ b/bin/real-aarch64-linux-android-g++
Binary files differ
diff --git a/bin/real-aarch64-linux-android-gcc b/bin/real-aarch64-linux-android-gcc
index d6b1a3c..5608486 100755
--- a/bin/real-aarch64-linux-android-gcc
+++ b/bin/real-aarch64-linux-android-gcc
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/crtbegin.o b/lib/gcc/aarch64-linux-android/6.3.0/crtbegin.o
deleted file mode 100644
index 1b78cb0..0000000
--- a/lib/gcc/aarch64-linux-android/6.3.0/crtbegin.o
+++ /dev/null
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/crtbeginS.o b/lib/gcc/aarch64-linux-android/6.3.0/crtbeginS.o
deleted file mode 100644
index 57e5409..0000000
--- a/lib/gcc/aarch64-linux-android/6.3.0/crtbeginS.o
+++ /dev/null
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/crtbeginT.o b/lib/gcc/aarch64-linux-android/6.3.0/crtbeginT.o
deleted file mode 100644
index 1b78cb0..0000000
--- a/lib/gcc/aarch64-linux-android/6.3.0/crtbeginT.o
+++ /dev/null
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/crtend.o b/lib/gcc/aarch64-linux-android/6.3.0/crtend.o
deleted file mode 100644
index dc53c33..0000000
--- a/lib/gcc/aarch64-linux-android/6.3.0/crtend.o
+++ /dev/null
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/crtendS.o b/lib/gcc/aarch64-linux-android/6.3.0/crtendS.o
deleted file mode 100644
index dc53c33..0000000
--- a/lib/gcc/aarch64-linux-android/6.3.0/crtendS.o
+++ /dev/null
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/crtfastmath.o b/lib/gcc/aarch64-linux-android/6.3.0/crtfastmath.o
deleted file mode 100644
index e9b2d49..0000000
--- a/lib/gcc/aarch64-linux-android/6.3.0/crtfastmath.o
+++ /dev/null
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/libcaf_single.a b/lib/gcc/aarch64-linux-android/6.3.0/libcaf_single.a
deleted file mode 100644
index b26fab8..0000000
--- a/lib/gcc/aarch64-linux-android/6.3.0/libcaf_single.a
+++ /dev/null
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/libgcc.a b/lib/gcc/aarch64-linux-android/6.3.0/libgcc.a
deleted file mode 100644
index f7cc2c6..0000000
--- a/lib/gcc/aarch64-linux-android/6.3.0/libgcc.a
+++ /dev/null
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/libgcov.a b/lib/gcc/aarch64-linux-android/6.3.0/libgcov.a
deleted file mode 100644
index 58ee40e..0000000
--- a/lib/gcc/aarch64-linux-android/6.3.0/libgcov.a
+++ /dev/null
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.1/crtbegin.o b/lib/gcc/aarch64-linux-android/6.3.1/crtbegin.o
new file mode 100644
index 0000000..8409cb2
--- /dev/null
+++ b/lib/gcc/aarch64-linux-android/6.3.1/crtbegin.o
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.1/crtbeginS.o b/lib/gcc/aarch64-linux-android/6.3.1/crtbeginS.o
new file mode 100644
index 0000000..7981e1d
--- /dev/null
+++ b/lib/gcc/aarch64-linux-android/6.3.1/crtbeginS.o
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.1/crtbeginT.o b/lib/gcc/aarch64-linux-android/6.3.1/crtbeginT.o
new file mode 100644
index 0000000..8409cb2
--- /dev/null
+++ b/lib/gcc/aarch64-linux-android/6.3.1/crtbeginT.o
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.1/crtend.o b/lib/gcc/aarch64-linux-android/6.3.1/crtend.o
new file mode 100644
index 0000000..991edf8
--- /dev/null
+++ b/lib/gcc/aarch64-linux-android/6.3.1/crtend.o
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.1/crtendS.o b/lib/gcc/aarch64-linux-android/6.3.1/crtendS.o
new file mode 100644
index 0000000..991edf8
--- /dev/null
+++ b/lib/gcc/aarch64-linux-android/6.3.1/crtendS.o
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.1/crtfastmath.o b/lib/gcc/aarch64-linux-android/6.3.1/crtfastmath.o
new file mode 100644
index 0000000..4871f9d
--- /dev/null
+++ b/lib/gcc/aarch64-linux-android/6.3.1/crtfastmath.o
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/finclude/ieee_arithmetic.mod b/lib/gcc/aarch64-linux-android/6.3.1/finclude/ieee_arithmetic.mod
index 04f07de..04f07de 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/finclude/ieee_arithmetic.mod
+++ b/lib/gcc/aarch64-linux-android/6.3.1/finclude/ieee_arithmetic.mod
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/finclude/ieee_exceptions.mod b/lib/gcc/aarch64-linux-android/6.3.1/finclude/ieee_exceptions.mod
index 2d5afb1..2d5afb1 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/finclude/ieee_exceptions.mod
+++ b/lib/gcc/aarch64-linux-android/6.3.1/finclude/ieee_exceptions.mod
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/finclude/ieee_features.mod b/lib/gcc/aarch64-linux-android/6.3.1/finclude/ieee_features.mod
index a8f96c8..a8f96c8 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/finclude/ieee_features.mod
+++ b/lib/gcc/aarch64-linux-android/6.3.1/finclude/ieee_features.mod
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/finclude/omp_lib.f90 b/lib/gcc/aarch64-linux-android/6.3.1/finclude/omp_lib.f90
index 617953f..617953f 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/finclude/omp_lib.f90
+++ b/lib/gcc/aarch64-linux-android/6.3.1/finclude/omp_lib.f90
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/finclude/omp_lib.h b/lib/gcc/aarch64-linux-android/6.3.1/finclude/omp_lib.h
index a3fbae1..a3fbae1 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/finclude/omp_lib.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/finclude/omp_lib.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/finclude/omp_lib.mod b/lib/gcc/aarch64-linux-android/6.3.1/finclude/omp_lib.mod
index 75d9e11..75d9e11 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/finclude/omp_lib.mod
+++ b/lib/gcc/aarch64-linux-android/6.3.1/finclude/omp_lib.mod
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/finclude/omp_lib_kinds.mod b/lib/gcc/aarch64-linux-android/6.3.1/finclude/omp_lib_kinds.mod
index dcd1d33..dcd1d33 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/finclude/omp_lib_kinds.mod
+++ b/lib/gcc/aarch64-linux-android/6.3.1/finclude/omp_lib_kinds.mod
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/finclude/openacc.f90 b/lib/gcc/aarch64-linux-android/6.3.1/finclude/openacc.f90
index 4b71489..4b71489 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/finclude/openacc.f90
+++ b/lib/gcc/aarch64-linux-android/6.3.1/finclude/openacc.f90
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/finclude/openacc.mod b/lib/gcc/aarch64-linux-android/6.3.1/finclude/openacc.mod
index 65cdce5..65cdce5 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/finclude/openacc.mod
+++ b/lib/gcc/aarch64-linux-android/6.3.1/finclude/openacc.mod
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/finclude/openacc_kinds.mod b/lib/gcc/aarch64-linux-android/6.3.1/finclude/openacc_kinds.mod
index 5f1934c..5f1934c 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/finclude/openacc_kinds.mod
+++ b/lib/gcc/aarch64-linux-android/6.3.1/finclude/openacc_kinds.mod
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/finclude/openacc_lib.h b/lib/gcc/aarch64-linux-android/6.3.1/finclude/openacc_lib.h
index a3f94d7..a3f94d7 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/finclude/openacc_lib.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/finclude/openacc_lib.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include-fixed/README b/lib/gcc/aarch64-linux-android/6.3.1/include-fixed/README
index 7086a77..7086a77 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include-fixed/README
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include-fixed/README
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include-fixed/limits.h b/lib/gcc/aarch64-linux-android/6.3.1/include-fixed/limits.h
index 27e5369..27e5369 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include-fixed/limits.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include-fixed/limits.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include-fixed/linux/a.out.h b/lib/gcc/aarch64-linux-android/6.3.1/include-fixed/linux/a.out.h
index 4a023d3..4b707f5 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include-fixed/linux/a.out.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include-fixed/linux/a.out.h
@@ -2,7 +2,7 @@
It has been auto-edited by fixincludes from:
- "/tmp/32882bbc6f4a79c33d52361d8217b885/sysroot/usr/include/linux/a.out.h"
+ "/tmp/e0c13069eac71edbeaff7853acb9c12f/sysroot/usr/include/linux/a.out.h"
This had to be done to correct non-standard usages in the
original, manufacturer supplied header file. */
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include-fixed/syslimits.h b/lib/gcc/aarch64-linux-android/6.3.1/include-fixed/syslimits.h
index a362802..a362802 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include-fixed/syslimits.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include-fixed/syslimits.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/arm_acle.h b/lib/gcc/aarch64-linux-android/6.3.1/include/arm_acle.h
index 3f85d52..3f85d52 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/arm_acle.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/arm_acle.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.1/include/arm_fp16.h b/lib/gcc/aarch64-linux-android/6.3.1/include/arm_fp16.h
new file mode 100644
index 0000000..4b7c2dd
--- /dev/null
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/arm_fp16.h
@@ -0,0 +1,579 @@
+/* ARM FP16 scalar intrinsics include file.
+
+ Copyright (C) 2016 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _AARCH64_FP16_H_
+#define _AARCH64_FP16_H_
+
+#include <stdint.h>
+
+#pragma GCC push_options
+#pragma GCC target ("arch=armv8.2-a+fp16")
+
+typedef __fp16 float16_t;
+
+/* ARMv8.2-A FP16 one operand scalar intrinsics. */
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vabsh_f16 (float16_t __a)
+{
+ return __builtin_aarch64_abshf (__a);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vceqzh_f16 (float16_t __a)
+{
+ return __builtin_aarch64_cmeqhf_uss (__a, 0.0f);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vcgezh_f16 (float16_t __a)
+{
+ return __builtin_aarch64_cmgehf_uss (__a, 0.0f);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vcgtzh_f16 (float16_t __a)
+{
+ return __builtin_aarch64_cmgthf_uss (__a, 0.0f);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vclezh_f16 (float16_t __a)
+{
+ return __builtin_aarch64_cmlehf_uss (__a, 0.0f);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vcltzh_f16 (float16_t __a)
+{
+ return __builtin_aarch64_cmlthf_uss (__a, 0.0f);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vcvth_f16_s16 (int16_t __a)
+{
+ return __builtin_aarch64_floathihf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vcvth_f16_s32 (int32_t __a)
+{
+ return __builtin_aarch64_floatsihf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vcvth_f16_s64 (int64_t __a)
+{
+ return __builtin_aarch64_floatdihf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vcvth_f16_u16 (uint16_t __a)
+{
+ return __builtin_aarch64_floatunshihf_us (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vcvth_f16_u32 (uint32_t __a)
+{
+ return __builtin_aarch64_floatunssihf_us (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vcvth_f16_u64 (uint64_t __a)
+{
+ return __builtin_aarch64_floatunsdihf_us (__a);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vcvth_s16_f16 (float16_t __a)
+{
+ return __builtin_aarch64_fix_trunchfhi (__a);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvth_s32_f16 (float16_t __a)
+{
+ return __builtin_aarch64_fix_trunchfsi (__a);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vcvth_s64_f16 (float16_t __a)
+{
+ return __builtin_aarch64_fix_trunchfdi (__a);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vcvth_u16_f16 (float16_t __a)
+{
+ return __builtin_aarch64_fixuns_trunchfhi_us (__a);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvth_u32_f16 (float16_t __a)
+{
+ return __builtin_aarch64_fixuns_trunchfsi_us (__a);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcvth_u64_f16 (float16_t __a)
+{
+ return __builtin_aarch64_fixuns_trunchfdi_us (__a);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vcvtah_s16_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lroundhfhi (__a);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvtah_s32_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lroundhfsi (__a);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vcvtah_s64_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lroundhfdi (__a);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vcvtah_u16_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lrounduhfhi_us (__a);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvtah_u32_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lrounduhfsi_us (__a);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcvtah_u64_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lrounduhfdi_us (__a);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vcvtmh_s16_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lfloorhfhi (__a);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvtmh_s32_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lfloorhfsi (__a);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vcvtmh_s64_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lfloorhfdi (__a);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vcvtmh_u16_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lflooruhfhi_us (__a);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvtmh_u32_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lflooruhfsi_us (__a);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcvtmh_u64_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lflooruhfdi_us (__a);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vcvtnh_s16_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lfrintnhfhi (__a);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvtnh_s32_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lfrintnhfsi (__a);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vcvtnh_s64_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lfrintnhfdi (__a);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vcvtnh_u16_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lfrintnuhfhi_us (__a);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvtnh_u32_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lfrintnuhfsi_us (__a);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcvtnh_u64_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lfrintnuhfdi_us (__a);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vcvtph_s16_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lceilhfhi (__a);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvtph_s32_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lceilhfsi (__a);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vcvtph_s64_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lceilhfdi (__a);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vcvtph_u16_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lceiluhfhi_us (__a);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvtph_u32_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lceiluhfsi_us (__a);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcvtph_u64_f16 (float16_t __a)
+{
+ return __builtin_aarch64_lceiluhfdi_us (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vnegh_f16 (float16_t __a)
+{
+ return __builtin_aarch64_neghf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrecpeh_f16 (float16_t __a)
+{
+ return __builtin_aarch64_frecpehf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrecpxh_f16 (float16_t __a)
+{
+ return __builtin_aarch64_frecpxhf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrndh_f16 (float16_t __a)
+{
+ return __builtin_aarch64_btrunchf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrndah_f16 (float16_t __a)
+{
+ return __builtin_aarch64_roundhf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrndih_f16 (float16_t __a)
+{
+ return __builtin_aarch64_nearbyinthf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrndmh_f16 (float16_t __a)
+{
+ return __builtin_aarch64_floorhf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrndnh_f16 (float16_t __a)
+{
+ return __builtin_aarch64_frintnhf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrndph_f16 (float16_t __a)
+{
+ return __builtin_aarch64_ceilhf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrndxh_f16 (float16_t __a)
+{
+ return __builtin_aarch64_rinthf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrsqrteh_f16 (float16_t __a)
+{
+ return __builtin_aarch64_rsqrtehf (__a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vsqrth_f16 (float16_t __a)
+{
+ return __builtin_aarch64_sqrthf (__a);
+}
+
+/* ARMv8.2-A FP16 two operands scalar intrinsics. */
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vaddh_f16 (float16_t __a, float16_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vabdh_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_aarch64_fabdhf (__a, __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vcageh_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_aarch64_facgehf_uss (__a, __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vcagth_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_aarch64_facgthf_uss (__a, __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vcaleh_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_aarch64_faclehf_uss (__a, __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vcalth_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_aarch64_faclthf_uss (__a, __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vceqh_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_aarch64_cmeqhf_uss (__a, __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vcgeh_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_aarch64_cmgehf_uss (__a, __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vcgth_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_aarch64_cmgthf_uss (__a, __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vcleh_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_aarch64_cmlehf_uss (__a, __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vclth_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_aarch64_cmlthf_uss (__a, __b);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vcvth_n_f16_s16 (int16_t __a, const int __b)
+{
+ return __builtin_aarch64_scvtfhi (__a, __b);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vcvth_n_f16_s32 (int32_t __a, const int __b)
+{
+ return __builtin_aarch64_scvtfsihf (__a, __b);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vcvth_n_f16_s64 (int64_t __a, const int __b)
+{
+ return __builtin_aarch64_scvtfdihf (__a, __b);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vcvth_n_f16_u16 (uint16_t __a, const int __b)
+{
+ return __builtin_aarch64_ucvtfhi_sus (__a, __b);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vcvth_n_f16_u32 (uint32_t __a, const int __b)
+{
+ return __builtin_aarch64_ucvtfsihf_sus (__a, __b);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vcvth_n_f16_u64 (uint64_t __a, const int __b)
+{
+ return __builtin_aarch64_ucvtfdihf_sus (__a, __b);
+}
+
+__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+vcvth_n_s16_f16 (float16_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzshf (__a, __b);
+}
+
+__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+vcvth_n_s32_f16 (float16_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzshfsi (__a, __b);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+vcvth_n_s64_f16 (float16_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzshfdi (__a, __b);
+}
+
+__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+vcvth_n_u16_f16 (float16_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzuhf_uss (__a, __b);
+}
+
+__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+vcvth_n_u32_f16 (float16_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzuhfsi_uss (__a, __b);
+}
+
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+vcvth_n_u64_f16 (float16_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzuhfdi_uss (__a, __b);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vdivh_f16 (float16_t __a, float16_t __b)
+{
+ return __a / __b;
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vmaxh_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_aarch64_fmaxhf (__a, __b);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vmaxnmh_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_aarch64_fmaxhf (__a, __b);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vminh_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_aarch64_fminhf (__a, __b);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vminnmh_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_aarch64_fminhf (__a, __b);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vmulh_f16 (float16_t __a, float16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vmulxh_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_aarch64_fmulxhf (__a, __b);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrecpsh_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_aarch64_frecpshf (__a, __b);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vrsqrtsh_f16 (float16_t __a, float16_t __b)
+{
+ return __builtin_aarch64_rsqrtshf (__a, __b);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vsubh_f16 (float16_t __a, float16_t __b)
+{
+ return __a - __b;
+}
+
+/* ARMv8.2-A FP16 three operands scalar intrinsics. */
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vfmah_f16 (float16_t __a, float16_t __b, float16_t __c)
+{
+ return __builtin_aarch64_fmahf (__b, __c, __a);
+}
+
+__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+vfmsh_f16 (float16_t __a, float16_t __b, float16_t __c)
+{
+ return __builtin_aarch64_fnmahf (__b, __c, __a);
+}
+
+#pragma GCC pop_options
+
+#endif
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/arm_neon.h b/lib/gcc/aarch64-linux-android/6.3.1/include/arm_neon.h
index ec54368..b846644 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/arm_neon.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/arm_neon.h
@@ -58,6 +58,7 @@ typedef __Float64x2_t float64x2_t;
typedef __Poly8x16_t poly8x16_t;
typedef __Poly16x8_t poly16x8_t;
typedef __Poly64x2_t poly64x2_t;
+typedef __Poly64x1_t poly64x1_t;
typedef __Uint8x16_t uint8x16_t;
typedef __Uint16x8_t uint16x8_t;
typedef __Uint32x4_t uint32x4_t;
@@ -202,6 +203,36 @@ typedef struct poly16x8x2_t
poly16x8_t val[2];
} poly16x8x2_t;
+typedef struct poly64x1x2_t
+{
+ poly64x1_t val[2];
+} poly64x1x2_t;
+
+typedef struct poly64x1x3_t
+{
+ poly64x1_t val[3];
+} poly64x1x3_t;
+
+typedef struct poly64x1x4_t
+{
+ poly64x1_t val[4];
+} poly64x1x4_t;
+
+typedef struct poly64x2x2_t
+{
+ poly64x2_t val[2];
+} poly64x2x2_t;
+
+typedef struct poly64x2x3_t
+{
+ poly64x2_t val[3];
+} poly64x2x3_t;
+
+typedef struct poly64x2x4_t
+{
+ poly64x2_t val[4];
+} poly64x2x4_t;
+
typedef struct int8x8x3_t
{
int8x8_t val[3];
@@ -466,6 +497,8 @@ typedef struct poly16x8x4_t
#define __aarch64_vdup_lane_any(__size, __q, __a, __b) \
vdup##__q##_n_##__size (__aarch64_vget_lane_any (__a, __b))
+#define __aarch64_vdup_lane_f16(__a, __b) \
+ __aarch64_vdup_lane_any (f16, , __a, __b)
#define __aarch64_vdup_lane_f32(__a, __b) \
__aarch64_vdup_lane_any (f32, , __a, __b)
#define __aarch64_vdup_lane_f64(__a, __b) \
@@ -474,6 +507,8 @@ typedef struct poly16x8x4_t
__aarch64_vdup_lane_any (p8, , __a, __b)
#define __aarch64_vdup_lane_p16(__a, __b) \
__aarch64_vdup_lane_any (p16, , __a, __b)
+#define __aarch64_vdup_lane_p64(__a, __b) \
+ __aarch64_vdup_lane_any (p64, , __a, __b)
#define __aarch64_vdup_lane_s8(__a, __b) \
__aarch64_vdup_lane_any (s8, , __a, __b)
#define __aarch64_vdup_lane_s16(__a, __b) \
@@ -492,6 +527,8 @@ typedef struct poly16x8x4_t
__aarch64_vdup_lane_any (u64, , __a, __b)
/* __aarch64_vdup_laneq internal macros. */
+#define __aarch64_vdup_laneq_f16(__a, __b) \
+ __aarch64_vdup_lane_any (f16, , __a, __b)
#define __aarch64_vdup_laneq_f32(__a, __b) \
__aarch64_vdup_lane_any (f32, , __a, __b)
#define __aarch64_vdup_laneq_f64(__a, __b) \
@@ -500,6 +537,8 @@ typedef struct poly16x8x4_t
__aarch64_vdup_lane_any (p8, , __a, __b)
#define __aarch64_vdup_laneq_p16(__a, __b) \
__aarch64_vdup_lane_any (p16, , __a, __b)
+#define __aarch64_vdup_laneq_p64(__a, __b) \
+ __aarch64_vdup_lane_any (p64, , __a, __b)
#define __aarch64_vdup_laneq_s8(__a, __b) \
__aarch64_vdup_lane_any (s8, , __a, __b)
#define __aarch64_vdup_laneq_s16(__a, __b) \
@@ -518,6 +557,8 @@ typedef struct poly16x8x4_t
__aarch64_vdup_lane_any (u64, , __a, __b)
/* __aarch64_vdupq_lane internal macros. */
+#define __aarch64_vdupq_lane_f16(__a, __b) \
+ __aarch64_vdup_lane_any (f16, q, __a, __b)
#define __aarch64_vdupq_lane_f32(__a, __b) \
__aarch64_vdup_lane_any (f32, q, __a, __b)
#define __aarch64_vdupq_lane_f64(__a, __b) \
@@ -526,6 +567,8 @@ typedef struct poly16x8x4_t
__aarch64_vdup_lane_any (p8, q, __a, __b)
#define __aarch64_vdupq_lane_p16(__a, __b) \
__aarch64_vdup_lane_any (p16, q, __a, __b)
+#define __aarch64_vdupq_lane_p64(__a, __b) \
+ __aarch64_vdup_lane_any (p64, q, __a, __b)
#define __aarch64_vdupq_lane_s8(__a, __b) \
__aarch64_vdup_lane_any (s8, q, __a, __b)
#define __aarch64_vdupq_lane_s16(__a, __b) \
@@ -544,6 +587,8 @@ typedef struct poly16x8x4_t
__aarch64_vdup_lane_any (u64, q, __a, __b)
/* __aarch64_vdupq_laneq internal macros. */
+#define __aarch64_vdupq_laneq_f16(__a, __b) \
+ __aarch64_vdup_lane_any (f16, q, __a, __b)
#define __aarch64_vdupq_laneq_f32(__a, __b) \
__aarch64_vdup_lane_any (f32, q, __a, __b)
#define __aarch64_vdupq_laneq_f64(__a, __b) \
@@ -552,6 +597,8 @@ typedef struct poly16x8x4_t
__aarch64_vdup_lane_any (p8, q, __a, __b)
#define __aarch64_vdupq_laneq_p16(__a, __b) \
__aarch64_vdup_lane_any (p16, q, __a, __b)
+#define __aarch64_vdupq_laneq_p64(__a, __b) \
+ __aarch64_vdup_lane_any (p64, q, __a, __b)
#define __aarch64_vdupq_laneq_s8(__a, __b) \
__aarch64_vdup_lane_any (s8, q, __a, __b)
#define __aarch64_vdupq_laneq_s16(__a, __b) \
@@ -601,535 +648,619 @@ typedef struct poly16x8x4_t
})
/* vadd */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vadd_s8 (int8x8_t __a, int8x8_t __b)
{
return __a + __b;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vadd_s16 (int16x4_t __a, int16x4_t __b)
{
return __a + __b;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vadd_s32 (int32x2_t __a, int32x2_t __b)
{
return __a + __b;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vadd_f32 (float32x2_t __a, float32x2_t __b)
{
return __a + __b;
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vadd_f64 (float64x1_t __a, float64x1_t __b)
{
return __a + __b;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vadd_u8 (uint8x8_t __a, uint8x8_t __b)
{
return __a + __b;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vadd_u16 (uint16x4_t __a, uint16x4_t __b)
{
return __a + __b;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vadd_u32 (uint32x2_t __a, uint32x2_t __b)
{
return __a + __b;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vadd_s64 (int64x1_t __a, int64x1_t __b)
{
return __a + __b;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vadd_u64 (uint64x1_t __a, uint64x1_t __b)
{
return __a + __b;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddq_s8 (int8x16_t __a, int8x16_t __b)
{
return __a + __b;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddq_s16 (int16x8_t __a, int16x8_t __b)
{
return __a + __b;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddq_s32 (int32x4_t __a, int32x4_t __b)
{
return __a + __b;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddq_s64 (int64x2_t __a, int64x2_t __b)
{
return __a + __b;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddq_f32 (float32x4_t __a, float32x4_t __b)
{
return __a + __b;
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddq_f64 (float64x2_t __a, float64x2_t __b)
{
return __a + __b;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return __a + __b;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return __a + __b;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return __a + __b;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddq_u64 (uint64x2_t __a, uint64x2_t __b)
{
return __a + __b;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddl_s8 (int8x8_t __a, int8x8_t __b)
{
return (int16x8_t) __builtin_aarch64_saddlv8qi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddl_s16 (int16x4_t __a, int16x4_t __b)
{
return (int32x4_t) __builtin_aarch64_saddlv4hi (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddl_s32 (int32x2_t __a, int32x2_t __b)
{
return (int64x2_t) __builtin_aarch64_saddlv2si (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddl_u8 (uint8x8_t __a, uint8x8_t __b)
{
return (uint16x8_t) __builtin_aarch64_uaddlv8qi ((int8x8_t) __a,
(int8x8_t) __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddl_u16 (uint16x4_t __a, uint16x4_t __b)
{
return (uint32x4_t) __builtin_aarch64_uaddlv4hi ((int16x4_t) __a,
(int16x4_t) __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddl_u32 (uint32x2_t __a, uint32x2_t __b)
{
return (uint64x2_t) __builtin_aarch64_uaddlv2si ((int32x2_t) __a,
(int32x2_t) __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddl_high_s8 (int8x16_t __a, int8x16_t __b)
{
return (int16x8_t) __builtin_aarch64_saddl2v16qi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddl_high_s16 (int16x8_t __a, int16x8_t __b)
{
return (int32x4_t) __builtin_aarch64_saddl2v8hi (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddl_high_s32 (int32x4_t __a, int32x4_t __b)
{
return (int64x2_t) __builtin_aarch64_saddl2v4si (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddl_high_u8 (uint8x16_t __a, uint8x16_t __b)
{
return (uint16x8_t) __builtin_aarch64_uaddl2v16qi ((int8x16_t) __a,
(int8x16_t) __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddl_high_u16 (uint16x8_t __a, uint16x8_t __b)
{
return (uint32x4_t) __builtin_aarch64_uaddl2v8hi ((int16x8_t) __a,
(int16x8_t) __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddl_high_u32 (uint32x4_t __a, uint32x4_t __b)
{
return (uint64x2_t) __builtin_aarch64_uaddl2v4si ((int32x4_t) __a,
(int32x4_t) __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddw_s8 (int16x8_t __a, int8x8_t __b)
{
return (int16x8_t) __builtin_aarch64_saddwv8qi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddw_s16 (int32x4_t __a, int16x4_t __b)
{
return (int32x4_t) __builtin_aarch64_saddwv4hi (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddw_s32 (int64x2_t __a, int32x2_t __b)
{
return (int64x2_t) __builtin_aarch64_saddwv2si (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddw_u8 (uint16x8_t __a, uint8x8_t __b)
{
return (uint16x8_t) __builtin_aarch64_uaddwv8qi ((int16x8_t) __a,
(int8x8_t) __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddw_u16 (uint32x4_t __a, uint16x4_t __b)
{
return (uint32x4_t) __builtin_aarch64_uaddwv4hi ((int32x4_t) __a,
(int16x4_t) __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddw_u32 (uint64x2_t __a, uint32x2_t __b)
{
return (uint64x2_t) __builtin_aarch64_uaddwv2si ((int64x2_t) __a,
(int32x2_t) __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddw_high_s8 (int16x8_t __a, int8x16_t __b)
{
return (int16x8_t) __builtin_aarch64_saddw2v16qi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddw_high_s16 (int32x4_t __a, int16x8_t __b)
{
return (int32x4_t) __builtin_aarch64_saddw2v8hi (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddw_high_s32 (int64x2_t __a, int32x4_t __b)
{
return (int64x2_t) __builtin_aarch64_saddw2v4si (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddw_high_u8 (uint16x8_t __a, uint8x16_t __b)
{
return (uint16x8_t) __builtin_aarch64_uaddw2v16qi ((int16x8_t) __a,
(int8x16_t) __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddw_high_u16 (uint32x4_t __a, uint16x8_t __b)
{
return (uint32x4_t) __builtin_aarch64_uaddw2v8hi ((int32x4_t) __a,
(int16x8_t) __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddw_high_u32 (uint64x2_t __a, uint32x4_t __b)
{
return (uint64x2_t) __builtin_aarch64_uaddw2v4si ((int64x2_t) __a,
(int32x4_t) __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhadd_s8 (int8x8_t __a, int8x8_t __b)
{
return (int8x8_t) __builtin_aarch64_shaddv8qi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhadd_s16 (int16x4_t __a, int16x4_t __b)
{
return (int16x4_t) __builtin_aarch64_shaddv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhadd_s32 (int32x2_t __a, int32x2_t __b)
{
return (int32x2_t) __builtin_aarch64_shaddv2si (__a, __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhadd_u8 (uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t) __builtin_aarch64_uhaddv8qi ((int8x8_t) __a,
(int8x8_t) __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhadd_u16 (uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t) __builtin_aarch64_uhaddv4hi ((int16x4_t) __a,
(int16x4_t) __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhadd_u32 (uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t) __builtin_aarch64_uhaddv2si ((int32x2_t) __a,
(int32x2_t) __b);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhaddq_s8 (int8x16_t __a, int8x16_t __b)
{
return (int8x16_t) __builtin_aarch64_shaddv16qi (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhaddq_s16 (int16x8_t __a, int16x8_t __b)
{
return (int16x8_t) __builtin_aarch64_shaddv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhaddq_s32 (int32x4_t __a, int32x4_t __b)
{
return (int32x4_t) __builtin_aarch64_shaddv4si (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t) __builtin_aarch64_uhaddv16qi ((int8x16_t) __a,
(int8x16_t) __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t) __builtin_aarch64_uhaddv8hi ((int16x8_t) __a,
(int16x8_t) __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t) __builtin_aarch64_uhaddv4si ((int32x4_t) __a,
(int32x4_t) __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrhadd_s8 (int8x8_t __a, int8x8_t __b)
{
return (int8x8_t) __builtin_aarch64_srhaddv8qi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrhadd_s16 (int16x4_t __a, int16x4_t __b)
{
return (int16x4_t) __builtin_aarch64_srhaddv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrhadd_s32 (int32x2_t __a, int32x2_t __b)
{
return (int32x2_t) __builtin_aarch64_srhaddv2si (__a, __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrhadd_u8 (uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t) __builtin_aarch64_urhaddv8qi ((int8x8_t) __a,
(int8x8_t) __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrhadd_u16 (uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t) __builtin_aarch64_urhaddv4hi ((int16x4_t) __a,
(int16x4_t) __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrhadd_u32 (uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t) __builtin_aarch64_urhaddv2si ((int32x2_t) __a,
(int32x2_t) __b);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrhaddq_s8 (int8x16_t __a, int8x16_t __b)
{
return (int8x16_t) __builtin_aarch64_srhaddv16qi (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrhaddq_s16 (int16x8_t __a, int16x8_t __b)
{
return (int16x8_t) __builtin_aarch64_srhaddv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrhaddq_s32 (int32x4_t __a, int32x4_t __b)
{
return (int32x4_t) __builtin_aarch64_srhaddv4si (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t) __builtin_aarch64_urhaddv16qi ((int8x16_t) __a,
(int8x16_t) __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t) __builtin_aarch64_urhaddv8hi ((int16x8_t) __a,
(int16x8_t) __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t) __builtin_aarch64_urhaddv4si ((int32x4_t) __a,
(int32x4_t) __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_s16 (int16x8_t __a, int16x8_t __b)
{
return (int8x8_t) __builtin_aarch64_addhnv8hi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_s32 (int32x4_t __a, int32x4_t __b)
{
return (int16x4_t) __builtin_aarch64_addhnv4si (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_s64 (int64x2_t __a, int64x2_t __b)
{
return (int32x2_t) __builtin_aarch64_addhnv2di (__a, __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_u16 (uint16x8_t __a, uint16x8_t __b)
{
return (uint8x8_t) __builtin_aarch64_addhnv8hi ((int16x8_t) __a,
(int16x8_t) __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_u32 (uint32x4_t __a, uint32x4_t __b)
{
return (uint16x4_t) __builtin_aarch64_addhnv4si ((int32x4_t) __a,
(int32x4_t) __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_u64 (uint64x2_t __a, uint64x2_t __b)
{
return (uint32x2_t) __builtin_aarch64_addhnv2di ((int64x2_t) __a,
(int64x2_t) __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_s16 (int16x8_t __a, int16x8_t __b)
{
return (int8x8_t) __builtin_aarch64_raddhnv8hi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_s32 (int32x4_t __a, int32x4_t __b)
{
return (int16x4_t) __builtin_aarch64_raddhnv4si (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_s64 (int64x2_t __a, int64x2_t __b)
{
return (int32x2_t) __builtin_aarch64_raddhnv2di (__a, __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_u16 (uint16x8_t __a, uint16x8_t __b)
{
return (uint8x8_t) __builtin_aarch64_raddhnv8hi ((int16x8_t) __a,
(int16x8_t) __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_u32 (uint32x4_t __a, uint32x4_t __b)
{
return (uint16x4_t) __builtin_aarch64_raddhnv4si ((int32x4_t) __a,
(int32x4_t) __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_u64 (uint64x2_t __a, uint64x2_t __b)
{
return (uint32x2_t) __builtin_aarch64_raddhnv2di ((int64x2_t) __a,
(int64x2_t) __b);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
{
return (int8x16_t) __builtin_aarch64_addhn2v8hi (__a, __b, __c);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
{
return (int16x8_t) __builtin_aarch64_addhn2v4si (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
{
return (int32x4_t) __builtin_aarch64_addhn2v2di (__a, __b, __c);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
{
return (uint8x16_t) __builtin_aarch64_addhn2v8hi ((int8x8_t) __a,
@@ -1137,7 +1268,8 @@ vaddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
(int16x8_t) __c);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
{
return (uint16x8_t) __builtin_aarch64_addhn2v4si ((int16x4_t) __a,
@@ -1145,7 +1277,8 @@ vaddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
(int32x4_t) __c);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
{
return (uint32x4_t) __builtin_aarch64_addhn2v2di ((int32x2_t) __a,
@@ -1153,25 +1286,29 @@ vaddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
(int64x2_t) __c);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
{
return (int8x16_t) __builtin_aarch64_raddhn2v8hi (__a, __b, __c);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
{
return (int16x8_t) __builtin_aarch64_raddhn2v4si (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
{
return (int32x4_t) __builtin_aarch64_raddhn2v2di (__a, __b, __c);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
{
return (uint8x16_t) __builtin_aarch64_raddhn2v8hi ((int8x8_t) __a,
@@ -1179,7 +1316,8 @@ vraddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
(int16x8_t) __c);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
{
return (uint16x8_t) __builtin_aarch64_raddhn2v4si ((int16x4_t) __a,
@@ -1187,7 +1325,8 @@ vraddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
(int32x4_t) __c);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
{
return (uint32x4_t) __builtin_aarch64_raddhn2v2di ((int32x2_t) __a,
@@ -1195,1101 +1334,1280 @@ vraddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
(int64x2_t) __c);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdiv_f32 (float32x2_t __a, float32x2_t __b)
{
return __a / __b;
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdiv_f64 (float64x1_t __a, float64x1_t __b)
{
return __a / __b;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdivq_f32 (float32x4_t __a, float32x4_t __b)
{
return __a / __b;
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdivq_f64 (float64x2_t __a, float64x2_t __b)
{
return __a / __b;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_s8 (int8x8_t __a, int8x8_t __b)
{
return __a * __b;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_s16 (int16x4_t __a, int16x4_t __b)
{
return __a * __b;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_s32 (int32x2_t __a, int32x2_t __b)
{
return __a * __b;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_f32 (float32x2_t __a, float32x2_t __b)
{
return __a * __b;
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_f64 (float64x1_t __a, float64x1_t __b)
{
return __a * __b;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_u8 (uint8x8_t __a, uint8x8_t __b)
{
return __a * __b;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_u16 (uint16x4_t __a, uint16x4_t __b)
{
return __a * __b;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_u32 (uint32x2_t __a, uint32x2_t __b)
{
return __a * __b;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_p8 (poly8x8_t __a, poly8x8_t __b)
{
return (poly8x8_t) __builtin_aarch64_pmulv8qi ((int8x8_t) __a,
(int8x8_t) __b);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_s8 (int8x16_t __a, int8x16_t __b)
{
return __a * __b;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_s16 (int16x8_t __a, int16x8_t __b)
{
return __a * __b;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_s32 (int32x4_t __a, int32x4_t __b)
{
return __a * __b;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_f32 (float32x4_t __a, float32x4_t __b)
{
return __a * __b;
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_f64 (float64x2_t __a, float64x2_t __b)
{
return __a * __b;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return __a * __b;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return __a * __b;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return __a * __b;
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_p8 (poly8x16_t __a, poly8x16_t __b)
{
return (poly8x16_t) __builtin_aarch64_pmulv16qi ((int8x16_t) __a,
(int8x16_t) __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vand_s8 (int8x8_t __a, int8x8_t __b)
{
return __a & __b;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vand_s16 (int16x4_t __a, int16x4_t __b)
{
return __a & __b;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vand_s32 (int32x2_t __a, int32x2_t __b)
{
return __a & __b;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vand_u8 (uint8x8_t __a, uint8x8_t __b)
{
return __a & __b;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vand_u16 (uint16x4_t __a, uint16x4_t __b)
{
return __a & __b;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vand_u32 (uint32x2_t __a, uint32x2_t __b)
{
return __a & __b;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vand_s64 (int64x1_t __a, int64x1_t __b)
{
return __a & __b;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vand_u64 (uint64x1_t __a, uint64x1_t __b)
{
return __a & __b;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vandq_s8 (int8x16_t __a, int8x16_t __b)
{
return __a & __b;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vandq_s16 (int16x8_t __a, int16x8_t __b)
{
return __a & __b;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vandq_s32 (int32x4_t __a, int32x4_t __b)
{
return __a & __b;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vandq_s64 (int64x2_t __a, int64x2_t __b)
{
return __a & __b;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vandq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return __a & __b;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vandq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return __a & __b;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vandq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return __a & __b;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vandq_u64 (uint64x2_t __a, uint64x2_t __b)
{
return __a & __b;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorr_s8 (int8x8_t __a, int8x8_t __b)
{
return __a | __b;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorr_s16 (int16x4_t __a, int16x4_t __b)
{
return __a | __b;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorr_s32 (int32x2_t __a, int32x2_t __b)
{
return __a | __b;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorr_u8 (uint8x8_t __a, uint8x8_t __b)
{
return __a | __b;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorr_u16 (uint16x4_t __a, uint16x4_t __b)
{
return __a | __b;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorr_u32 (uint32x2_t __a, uint32x2_t __b)
{
return __a | __b;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorr_s64 (int64x1_t __a, int64x1_t __b)
{
return __a | __b;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorr_u64 (uint64x1_t __a, uint64x1_t __b)
{
return __a | __b;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorrq_s8 (int8x16_t __a, int8x16_t __b)
{
return __a | __b;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorrq_s16 (int16x8_t __a, int16x8_t __b)
{
return __a | __b;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorrq_s32 (int32x4_t __a, int32x4_t __b)
{
return __a | __b;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorrq_s64 (int64x2_t __a, int64x2_t __b)
{
return __a | __b;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorrq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return __a | __b;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorrq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return __a | __b;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorrq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return __a | __b;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorrq_u64 (uint64x2_t __a, uint64x2_t __b)
{
return __a | __b;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
veor_s8 (int8x8_t __a, int8x8_t __b)
{
return __a ^ __b;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
veor_s16 (int16x4_t __a, int16x4_t __b)
{
return __a ^ __b;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
veor_s32 (int32x2_t __a, int32x2_t __b)
{
return __a ^ __b;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
veor_u8 (uint8x8_t __a, uint8x8_t __b)
{
return __a ^ __b;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
veor_u16 (uint16x4_t __a, uint16x4_t __b)
{
return __a ^ __b;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
veor_u32 (uint32x2_t __a, uint32x2_t __b)
{
return __a ^ __b;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
veor_s64 (int64x1_t __a, int64x1_t __b)
{
return __a ^ __b;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
veor_u64 (uint64x1_t __a, uint64x1_t __b)
{
return __a ^ __b;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
veorq_s8 (int8x16_t __a, int8x16_t __b)
{
return __a ^ __b;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
veorq_s16 (int16x8_t __a, int16x8_t __b)
{
return __a ^ __b;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
veorq_s32 (int32x4_t __a, int32x4_t __b)
{
return __a ^ __b;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
veorq_s64 (int64x2_t __a, int64x2_t __b)
{
return __a ^ __b;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
veorq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return __a ^ __b;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
veorq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return __a ^ __b;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
veorq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return __a ^ __b;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
veorq_u64 (uint64x2_t __a, uint64x2_t __b)
{
return __a ^ __b;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbic_s8 (int8x8_t __a, int8x8_t __b)
{
return __a & ~__b;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbic_s16 (int16x4_t __a, int16x4_t __b)
{
return __a & ~__b;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbic_s32 (int32x2_t __a, int32x2_t __b)
{
return __a & ~__b;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbic_u8 (uint8x8_t __a, uint8x8_t __b)
{
return __a & ~__b;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbic_u16 (uint16x4_t __a, uint16x4_t __b)
{
return __a & ~__b;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbic_u32 (uint32x2_t __a, uint32x2_t __b)
{
return __a & ~__b;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbic_s64 (int64x1_t __a, int64x1_t __b)
{
return __a & ~__b;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbic_u64 (uint64x1_t __a, uint64x1_t __b)
{
return __a & ~__b;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbicq_s8 (int8x16_t __a, int8x16_t __b)
{
return __a & ~__b;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbicq_s16 (int16x8_t __a, int16x8_t __b)
{
return __a & ~__b;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbicq_s32 (int32x4_t __a, int32x4_t __b)
{
return __a & ~__b;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbicq_s64 (int64x2_t __a, int64x2_t __b)
{
return __a & ~__b;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbicq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return __a & ~__b;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbicq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return __a & ~__b;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbicq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return __a & ~__b;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbicq_u64 (uint64x2_t __a, uint64x2_t __b)
{
return __a & ~__b;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorn_s8 (int8x8_t __a, int8x8_t __b)
{
return __a | ~__b;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorn_s16 (int16x4_t __a, int16x4_t __b)
{
return __a | ~__b;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorn_s32 (int32x2_t __a, int32x2_t __b)
{
return __a | ~__b;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorn_u8 (uint8x8_t __a, uint8x8_t __b)
{
return __a | ~__b;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorn_u16 (uint16x4_t __a, uint16x4_t __b)
{
return __a | ~__b;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorn_u32 (uint32x2_t __a, uint32x2_t __b)
{
return __a | ~__b;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorn_s64 (int64x1_t __a, int64x1_t __b)
{
return __a | ~__b;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vorn_u64 (uint64x1_t __a, uint64x1_t __b)
{
return __a | ~__b;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vornq_s8 (int8x16_t __a, int8x16_t __b)
{
return __a | ~__b;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vornq_s16 (int16x8_t __a, int16x8_t __b)
{
return __a | ~__b;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vornq_s32 (int32x4_t __a, int32x4_t __b)
{
return __a | ~__b;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vornq_s64 (int64x2_t __a, int64x2_t __b)
{
return __a | ~__b;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vornq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return __a | ~__b;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vornq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return __a | ~__b;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vornq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return __a | ~__b;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vornq_u64 (uint64x2_t __a, uint64x2_t __b)
{
return __a | ~__b;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsub_s8 (int8x8_t __a, int8x8_t __b)
{
return __a - __b;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsub_s16 (int16x4_t __a, int16x4_t __b)
{
return __a - __b;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsub_s32 (int32x2_t __a, int32x2_t __b)
{
return __a - __b;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsub_f32 (float32x2_t __a, float32x2_t __b)
{
return __a - __b;
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsub_f64 (float64x1_t __a, float64x1_t __b)
{
return __a - __b;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsub_u8 (uint8x8_t __a, uint8x8_t __b)
{
return __a - __b;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsub_u16 (uint16x4_t __a, uint16x4_t __b)
{
return __a - __b;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsub_u32 (uint32x2_t __a, uint32x2_t __b)
{
return __a - __b;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsub_s64 (int64x1_t __a, int64x1_t __b)
{
return __a - __b;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsub_u64 (uint64x1_t __a, uint64x1_t __b)
{
return __a - __b;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubq_s8 (int8x16_t __a, int8x16_t __b)
{
return __a - __b;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubq_s16 (int16x8_t __a, int16x8_t __b)
{
return __a - __b;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubq_s32 (int32x4_t __a, int32x4_t __b)
{
return __a - __b;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubq_s64 (int64x2_t __a, int64x2_t __b)
{
return __a - __b;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubq_f32 (float32x4_t __a, float32x4_t __b)
{
return __a - __b;
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubq_f64 (float64x2_t __a, float64x2_t __b)
{
return __a - __b;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return __a - __b;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return __a - __b;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return __a - __b;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubq_u64 (uint64x2_t __a, uint64x2_t __b)
{
return __a - __b;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubl_s8 (int8x8_t __a, int8x8_t __b)
{
return (int16x8_t) __builtin_aarch64_ssublv8qi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubl_s16 (int16x4_t __a, int16x4_t __b)
{
return (int32x4_t) __builtin_aarch64_ssublv4hi (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubl_s32 (int32x2_t __a, int32x2_t __b)
{
return (int64x2_t) __builtin_aarch64_ssublv2si (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubl_u8 (uint8x8_t __a, uint8x8_t __b)
{
return (uint16x8_t) __builtin_aarch64_usublv8qi ((int8x8_t) __a,
(int8x8_t) __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubl_u16 (uint16x4_t __a, uint16x4_t __b)
{
return (uint32x4_t) __builtin_aarch64_usublv4hi ((int16x4_t) __a,
(int16x4_t) __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubl_u32 (uint32x2_t __a, uint32x2_t __b)
{
return (uint64x2_t) __builtin_aarch64_usublv2si ((int32x2_t) __a,
(int32x2_t) __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubl_high_s8 (int8x16_t __a, int8x16_t __b)
{
return (int16x8_t) __builtin_aarch64_ssubl2v16qi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubl_high_s16 (int16x8_t __a, int16x8_t __b)
{
return (int32x4_t) __builtin_aarch64_ssubl2v8hi (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubl_high_s32 (int32x4_t __a, int32x4_t __b)
{
return (int64x2_t) __builtin_aarch64_ssubl2v4si (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubl_high_u8 (uint8x16_t __a, uint8x16_t __b)
{
return (uint16x8_t) __builtin_aarch64_usubl2v16qi ((int8x16_t) __a,
(int8x16_t) __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubl_high_u16 (uint16x8_t __a, uint16x8_t __b)
{
return (uint32x4_t) __builtin_aarch64_usubl2v8hi ((int16x8_t) __a,
(int16x8_t) __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubl_high_u32 (uint32x4_t __a, uint32x4_t __b)
{
return (uint64x2_t) __builtin_aarch64_usubl2v4si ((int32x4_t) __a,
(int32x4_t) __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubw_s8 (int16x8_t __a, int8x8_t __b)
{
return (int16x8_t) __builtin_aarch64_ssubwv8qi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubw_s16 (int32x4_t __a, int16x4_t __b)
{
return (int32x4_t) __builtin_aarch64_ssubwv4hi (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubw_s32 (int64x2_t __a, int32x2_t __b)
{
return (int64x2_t) __builtin_aarch64_ssubwv2si (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubw_u8 (uint16x8_t __a, uint8x8_t __b)
{
return (uint16x8_t) __builtin_aarch64_usubwv8qi ((int16x8_t) __a,
(int8x8_t) __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubw_u16 (uint32x4_t __a, uint16x4_t __b)
{
return (uint32x4_t) __builtin_aarch64_usubwv4hi ((int32x4_t) __a,
(int16x4_t) __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubw_u32 (uint64x2_t __a, uint32x2_t __b)
{
return (uint64x2_t) __builtin_aarch64_usubwv2si ((int64x2_t) __a,
(int32x2_t) __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubw_high_s8 (int16x8_t __a, int8x16_t __b)
{
return (int16x8_t) __builtin_aarch64_ssubw2v16qi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubw_high_s16 (int32x4_t __a, int16x8_t __b)
{
return (int32x4_t) __builtin_aarch64_ssubw2v8hi (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubw_high_s32 (int64x2_t __a, int32x4_t __b)
{
return (int64x2_t) __builtin_aarch64_ssubw2v4si (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubw_high_u8 (uint16x8_t __a, uint8x16_t __b)
{
return (uint16x8_t) __builtin_aarch64_usubw2v16qi ((int16x8_t) __a,
(int8x16_t) __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubw_high_u16 (uint32x4_t __a, uint16x8_t __b)
{
return (uint32x4_t) __builtin_aarch64_usubw2v8hi ((int32x4_t) __a,
(int16x8_t) __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubw_high_u32 (uint64x2_t __a, uint32x4_t __b)
{
return (uint64x2_t) __builtin_aarch64_usubw2v4si ((int64x2_t) __a,
(int32x4_t) __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadd_s8 (int8x8_t __a, int8x8_t __b)
{
return (int8x8_t) __builtin_aarch64_sqaddv8qi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadd_s16 (int16x4_t __a, int16x4_t __b)
{
return (int16x4_t) __builtin_aarch64_sqaddv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadd_s32 (int32x2_t __a, int32x2_t __b)
{
return (int32x2_t) __builtin_aarch64_sqaddv2si (__a, __b);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadd_s64 (int64x1_t __a, int64x1_t __b)
{
return (int64x1_t) {__builtin_aarch64_sqadddi (__a[0], __b[0])};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadd_u8 (uint8x8_t __a, uint8x8_t __b)
{
return __builtin_aarch64_uqaddv8qi_uuu (__a, __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhsub_s8 (int8x8_t __a, int8x8_t __b)
{
return (int8x8_t)__builtin_aarch64_shsubv8qi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhsub_s16 (int16x4_t __a, int16x4_t __b)
{
return (int16x4_t) __builtin_aarch64_shsubv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhsub_s32 (int32x2_t __a, int32x2_t __b)
{
return (int32x2_t) __builtin_aarch64_shsubv2si (__a, __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhsub_u8 (uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t) __builtin_aarch64_uhsubv8qi ((int8x8_t) __a,
(int8x8_t) __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhsub_u16 (uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t) __builtin_aarch64_uhsubv4hi ((int16x4_t) __a,
(int16x4_t) __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhsub_u32 (uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t) __builtin_aarch64_uhsubv2si ((int32x2_t) __a,
(int32x2_t) __b);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhsubq_s8 (int8x16_t __a, int8x16_t __b)
{
return (int8x16_t) __builtin_aarch64_shsubv16qi (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhsubq_s16 (int16x8_t __a, int16x8_t __b)
{
return (int16x8_t) __builtin_aarch64_shsubv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhsubq_s32 (int32x4_t __a, int32x4_t __b)
{
return (int32x4_t) __builtin_aarch64_shsubv4si (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhsubq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t) __builtin_aarch64_uhsubv16qi ((int8x16_t) __a,
(int8x16_t) __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhsubq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t) __builtin_aarch64_uhsubv8hi ((int16x8_t) __a,
(int16x8_t) __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vhsubq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t) __builtin_aarch64_uhsubv4si ((int32x4_t) __a,
(int32x4_t) __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_s16 (int16x8_t __a, int16x8_t __b)
{
return (int8x8_t) __builtin_aarch64_subhnv8hi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_s32 (int32x4_t __a, int32x4_t __b)
{
return (int16x4_t) __builtin_aarch64_subhnv4si (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_s64 (int64x2_t __a, int64x2_t __b)
{
return (int32x2_t) __builtin_aarch64_subhnv2di (__a, __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_u16 (uint16x8_t __a, uint16x8_t __b)
{
return (uint8x8_t) __builtin_aarch64_subhnv8hi ((int16x8_t) __a,
(int16x8_t) __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_u32 (uint32x4_t __a, uint32x4_t __b)
{
return (uint16x4_t) __builtin_aarch64_subhnv4si ((int32x4_t) __a,
(int32x4_t) __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_u64 (uint64x2_t __a, uint64x2_t __b)
{
return (uint32x2_t) __builtin_aarch64_subhnv2di ((int64x2_t) __a,
(int64x2_t) __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_s16 (int16x8_t __a, int16x8_t __b)
{
return (int8x8_t) __builtin_aarch64_rsubhnv8hi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_s32 (int32x4_t __a, int32x4_t __b)
{
return (int16x4_t) __builtin_aarch64_rsubhnv4si (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_s64 (int64x2_t __a, int64x2_t __b)
{
return (int32x2_t) __builtin_aarch64_rsubhnv2di (__a, __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_u16 (uint16x8_t __a, uint16x8_t __b)
{
return (uint8x8_t) __builtin_aarch64_rsubhnv8hi ((int16x8_t) __a,
(int16x8_t) __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_u32 (uint32x4_t __a, uint32x4_t __b)
{
return (uint16x4_t) __builtin_aarch64_rsubhnv4si ((int32x4_t) __a,
(int32x4_t) __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_u64 (uint64x2_t __a, uint64x2_t __b)
{
return (uint32x2_t) __builtin_aarch64_rsubhnv2di ((int64x2_t) __a,
(int64x2_t) __b);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
{
return (int8x16_t) __builtin_aarch64_rsubhn2v8hi (__a, __b, __c);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
{
return (int16x8_t) __builtin_aarch64_rsubhn2v4si (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
{
return (int32x4_t) __builtin_aarch64_rsubhn2v2di (__a, __b, __c);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
{
return (uint8x16_t) __builtin_aarch64_rsubhn2v8hi ((int8x8_t) __a,
@@ -2297,7 +2615,8 @@ vrsubhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
(int16x8_t) __c);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
{
return (uint16x8_t) __builtin_aarch64_rsubhn2v4si ((int16x4_t) __a,
@@ -2305,7 +2624,8 @@ vrsubhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
(int32x4_t) __c);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
{
return (uint32x4_t) __builtin_aarch64_rsubhn2v2di ((int32x2_t) __a,
@@ -2313,25 +2633,29 @@ vrsubhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
(int64x2_t) __c);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
{
return (int8x16_t) __builtin_aarch64_subhn2v8hi (__a, __b, __c);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
{
return (int16x8_t) __builtin_aarch64_subhn2v4si (__a, __b, __c);;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
{
return (int32x4_t) __builtin_aarch64_subhn2v2di (__a, __b, __c);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
{
return (uint8x16_t) __builtin_aarch64_subhn2v8hi ((int8x8_t) __a,
@@ -2339,7 +2663,8 @@ vsubhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
(int16x8_t) __c);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
{
return (uint16x8_t) __builtin_aarch64_subhn2v4si ((int16x4_t) __a,
@@ -2347,7 +2672,8 @@ vsubhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
(int32x4_t) __c);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
{
return (uint32x4_t) __builtin_aarch64_subhn2v2di ((int32x2_t) __a,
@@ -2355,453 +2681,542 @@ vsubhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
(int64x2_t) __c);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadd_u16 (uint16x4_t __a, uint16x4_t __b)
{
return __builtin_aarch64_uqaddv4hi_uuu (__a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadd_u32 (uint32x2_t __a, uint32x2_t __b)
{
return __builtin_aarch64_uqaddv2si_uuu (__a, __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadd_u64 (uint64x1_t __a, uint64x1_t __b)
{
return (uint64x1_t) {__builtin_aarch64_uqadddi_uuu (__a[0], __b[0])};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_s8 (int8x16_t __a, int8x16_t __b)
{
return (int8x16_t) __builtin_aarch64_sqaddv16qi (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_s16 (int16x8_t __a, int16x8_t __b)
{
return (int16x8_t) __builtin_aarch64_sqaddv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_s32 (int32x4_t __a, int32x4_t __b)
{
return (int32x4_t) __builtin_aarch64_sqaddv4si (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_s64 (int64x2_t __a, int64x2_t __b)
{
return (int64x2_t) __builtin_aarch64_sqaddv2di (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return __builtin_aarch64_uqaddv16qi_uuu (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return __builtin_aarch64_uqaddv8hi_uuu (__a, __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return __builtin_aarch64_uqaddv4si_uuu (__a, __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_u64 (uint64x2_t __a, uint64x2_t __b)
{
return __builtin_aarch64_uqaddv2di_uuu (__a, __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsub_s8 (int8x8_t __a, int8x8_t __b)
{
return (int8x8_t) __builtin_aarch64_sqsubv8qi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsub_s16 (int16x4_t __a, int16x4_t __b)
{
return (int16x4_t) __builtin_aarch64_sqsubv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsub_s32 (int32x2_t __a, int32x2_t __b)
{
return (int32x2_t) __builtin_aarch64_sqsubv2si (__a, __b);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsub_s64 (int64x1_t __a, int64x1_t __b)
{
return (int64x1_t) {__builtin_aarch64_sqsubdi (__a[0], __b[0])};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsub_u8 (uint8x8_t __a, uint8x8_t __b)
{
return __builtin_aarch64_uqsubv8qi_uuu (__a, __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsub_u16 (uint16x4_t __a, uint16x4_t __b)
{
return __builtin_aarch64_uqsubv4hi_uuu (__a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsub_u32 (uint32x2_t __a, uint32x2_t __b)
{
return __builtin_aarch64_uqsubv2si_uuu (__a, __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsub_u64 (uint64x1_t __a, uint64x1_t __b)
{
return (uint64x1_t) {__builtin_aarch64_uqsubdi_uuu (__a[0], __b[0])};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_s8 (int8x16_t __a, int8x16_t __b)
{
return (int8x16_t) __builtin_aarch64_sqsubv16qi (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_s16 (int16x8_t __a, int16x8_t __b)
{
return (int16x8_t) __builtin_aarch64_sqsubv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_s32 (int32x4_t __a, int32x4_t __b)
{
return (int32x4_t) __builtin_aarch64_sqsubv4si (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_s64 (int64x2_t __a, int64x2_t __b)
{
return (int64x2_t) __builtin_aarch64_sqsubv2di (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return __builtin_aarch64_uqsubv16qi_uuu (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return __builtin_aarch64_uqsubv8hi_uuu (__a, __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return __builtin_aarch64_uqsubv4si_uuu (__a, __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_u64 (uint64x2_t __a, uint64x2_t __b)
{
return __builtin_aarch64_uqsubv2di_uuu (__a, __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqneg_s8 (int8x8_t __a)
{
return (int8x8_t) __builtin_aarch64_sqnegv8qi (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqneg_s16 (int16x4_t __a)
{
return (int16x4_t) __builtin_aarch64_sqnegv4hi (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqneg_s32 (int32x2_t __a)
{
return (int32x2_t) __builtin_aarch64_sqnegv2si (__a);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqneg_s64 (int64x1_t __a)
{
return (int64x1_t) {__builtin_aarch64_sqnegdi (__a[0])};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqnegq_s8 (int8x16_t __a)
{
return (int8x16_t) __builtin_aarch64_sqnegv16qi (__a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqnegq_s16 (int16x8_t __a)
{
return (int16x8_t) __builtin_aarch64_sqnegv8hi (__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqnegq_s32 (int32x4_t __a)
{
return (int32x4_t) __builtin_aarch64_sqnegv4si (__a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqabs_s8 (int8x8_t __a)
{
return (int8x8_t) __builtin_aarch64_sqabsv8qi (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqabs_s16 (int16x4_t __a)
{
return (int16x4_t) __builtin_aarch64_sqabsv4hi (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqabs_s32 (int32x2_t __a)
{
return (int32x2_t) __builtin_aarch64_sqabsv2si (__a);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqabs_s64 (int64x1_t __a)
{
return (int64x1_t) {__builtin_aarch64_sqabsdi (__a[0])};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqabsq_s8 (int8x16_t __a)
{
return (int8x16_t) __builtin_aarch64_sqabsv16qi (__a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqabsq_s16 (int16x8_t __a)
{
return (int16x8_t) __builtin_aarch64_sqabsv8hi (__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqabsq_s32 (int32x4_t __a)
{
return (int32x4_t) __builtin_aarch64_sqabsv4si (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulh_s16 (int16x4_t __a, int16x4_t __b)
{
return (int16x4_t) __builtin_aarch64_sqdmulhv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulh_s32 (int32x2_t __a, int32x2_t __b)
{
return (int32x2_t) __builtin_aarch64_sqdmulhv2si (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhq_s16 (int16x8_t __a, int16x8_t __b)
{
return (int16x8_t) __builtin_aarch64_sqdmulhv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhq_s32 (int32x4_t __a, int32x4_t __b)
{
return (int32x4_t) __builtin_aarch64_sqdmulhv4si (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulh_s16 (int16x4_t __a, int16x4_t __b)
{
return (int16x4_t) __builtin_aarch64_sqrdmulhv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulh_s32 (int32x2_t __a, int32x2_t __b)
{
return (int32x2_t) __builtin_aarch64_sqrdmulhv2si (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b)
{
return (int16x8_t) __builtin_aarch64_sqrdmulhv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b)
{
return (int32x4_t) __builtin_aarch64_sqrdmulhv4si (__a, __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcreate_s8 (uint64_t __a)
{
return (int8x8_t) __a;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcreate_s16 (uint64_t __a)
{
return (int16x4_t) __a;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcreate_s32 (uint64_t __a)
{
return (int32x2_t) __a;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcreate_s64 (uint64_t __a)
{
return (int64x1_t) {__a};
}
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcreate_f16 (uint64_t __a)
{
return (float16x4_t) __a;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcreate_f32 (uint64_t __a)
{
return (float32x2_t) __a;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcreate_u8 (uint64_t __a)
{
return (uint8x8_t) __a;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcreate_u16 (uint64_t __a)
{
return (uint16x4_t) __a;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcreate_u32 (uint64_t __a)
{
return (uint32x2_t) __a;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcreate_u64 (uint64_t __a)
{
return (uint64x1_t) {__a};
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcreate_f64 (uint64_t __a)
{
return (float64x1_t) __a;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcreate_p8 (uint64_t __a)
{
return (poly8x8_t) __a;
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcreate_p16 (uint64_t __a)
{
return (poly16x4_t) __a;
}
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcreate_p64 (uint64_t __a)
+{
+ return (poly64x1_t) __a;
+}
+
/* vget_lane */
-__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_f16 (float16x4_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_f32 (float32x2_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_f64 (float64x1_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_p8 (poly8x8_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_p16 (poly16x4_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_lane_p64 (poly64x1_t __a, const int __b)
+{
+ return __aarch64_vget_lane_any (__a, __b);
+}
+
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_s8 (int8x8_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_s16 (int16x4_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_s32 (int32x2_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_s64 (int64x1_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_u8 (uint8x8_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_u16 (uint16x4_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_u32 (uint32x2_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_u64 (uint64x1_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
@@ -2809,79 +3224,99 @@ vget_lane_u64 (uint64x1_t __a, const int __b)
/* vgetq_lane */
-__extension__ static __inline float16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_f16 (float16x8_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_f32 (float32x4_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_f64 (float64x2_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_p8 (poly8x16_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_p16 (poly16x8_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vgetq_lane_p64 (poly64x2_t __a, const int __b)
+{
+ return __aarch64_vget_lane_any (__a, __b);
+}
+
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_s8 (int8x16_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_s16 (int16x8_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_s32 (int32x4_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_s64 (int64x2_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_u8 (uint8x16_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_u16 (uint16x8_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_u32 (uint32x4_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_u64 (uint64x2_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
@@ -2889,1953 +3324,2832 @@ vgetq_lane_u64 (uint64x2_t __a, const int __b)
/* vreinterpret */
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_f16 (float16x4_t __a)
{
return (poly8x8_t) __a;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_f64 (float64x1_t __a)
{
return (poly8x8_t) __a;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_s8 (int8x8_t __a)
{
return (poly8x8_t) __a;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_s16 (int16x4_t __a)
{
return (poly8x8_t) __a;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_s32 (int32x2_t __a)
{
return (poly8x8_t) __a;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_s64 (int64x1_t __a)
{
return (poly8x8_t) __a;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_f32 (float32x2_t __a)
{
return (poly8x8_t) __a;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_u8 (uint8x8_t __a)
{
return (poly8x8_t) __a;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_u16 (uint16x4_t __a)
{
return (poly8x8_t) __a;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_u32 (uint32x2_t __a)
{
return (poly8x8_t) __a;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_u64 (uint64x1_t __a)
{
return (poly8x8_t) __a;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_p16 (poly16x4_t __a)
{
return (poly8x8_t) __a;
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p8_p64 (poly64x1_t __a)
+{
+ return (poly8x8_t) __a;
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_f64 (float64x2_t __a)
{
return (poly8x16_t) __a;
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_s8 (int8x16_t __a)
{
return (poly8x16_t) __a;
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_s16 (int16x8_t __a)
{
return (poly8x16_t) __a;
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_s32 (int32x4_t __a)
{
return (poly8x16_t) __a;
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_s64 (int64x2_t __a)
{
return (poly8x16_t) __a;
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_f16 (float16x8_t __a)
{
return (poly8x16_t) __a;
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_f32 (float32x4_t __a)
{
return (poly8x16_t) __a;
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_u8 (uint8x16_t __a)
{
return (poly8x16_t) __a;
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_u16 (uint16x8_t __a)
{
return (poly8x16_t) __a;
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_u32 (uint32x4_t __a)
{
return (poly8x16_t) __a;
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_u64 (uint64x2_t __a)
{
return (poly8x16_t) __a;
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_p16 (poly16x8_t __a)
{
return (poly8x16_t) __a;
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p8_p64 (poly64x2_t __a)
+{
+ return (poly8x16_t) __a;
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p8_p128 (poly128_t __a)
+{
+ return (poly8x16_t)__a;
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_f16 (float16x4_t __a)
{
return (poly16x4_t) __a;
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_f64 (float64x1_t __a)
{
return (poly16x4_t) __a;
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_s8 (int8x8_t __a)
{
return (poly16x4_t) __a;
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_s16 (int16x4_t __a)
{
return (poly16x4_t) __a;
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_s32 (int32x2_t __a)
{
return (poly16x4_t) __a;
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_s64 (int64x1_t __a)
{
return (poly16x4_t) __a;
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_f32 (float32x2_t __a)
{
return (poly16x4_t) __a;
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_u8 (uint8x8_t __a)
{
return (poly16x4_t) __a;
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_u16 (uint16x4_t __a)
{
return (poly16x4_t) __a;
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_u32 (uint32x2_t __a)
{
return (poly16x4_t) __a;
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_u64 (uint64x1_t __a)
{
return (poly16x4_t) __a;
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_p8 (poly8x8_t __a)
{
return (poly16x4_t) __a;
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p16_p64 (poly64x1_t __a)
+{
+ return (poly16x4_t) __a;
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_f64 (float64x2_t __a)
{
return (poly16x8_t) __a;
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_s8 (int8x16_t __a)
{
return (poly16x8_t) __a;
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_s16 (int16x8_t __a)
{
return (poly16x8_t) __a;
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_s32 (int32x4_t __a)
{
return (poly16x8_t) __a;
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_s64 (int64x2_t __a)
{
return (poly16x8_t) __a;
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_f16 (float16x8_t __a)
{
return (poly16x8_t) __a;
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_f32 (float32x4_t __a)
{
return (poly16x8_t) __a;
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_u8 (uint8x16_t __a)
{
return (poly16x8_t) __a;
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_u16 (uint16x8_t __a)
{
return (poly16x8_t) __a;
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_u32 (uint32x4_t __a)
{
return (poly16x8_t) __a;
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_u64 (uint64x2_t __a)
{
return (poly16x8_t) __a;
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_p8 (poly8x16_t __a)
{
return (poly16x8_t) __a;
}
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p16_p64 (poly64x2_t __a)
+{
+ return (poly16x8_t) __a;
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p16_p128 (poly128_t __a)
+{
+ return (poly16x8_t)__a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_f16 (float16x4_t __a)
+{
+ return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_f64 (float64x1_t __a)
+{
+ return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_s8 (int8x8_t __a)
+{
+ return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_s16 (int16x4_t __a)
+{
+ return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_s32 (int32x2_t __a)
+{
+ return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_s64 (int64x1_t __a)
+{
+ return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_f32 (float32x2_t __a)
+{
+ return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_u8 (uint8x8_t __a)
+{
+ return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_u16 (uint16x4_t __a)
+{
+ return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_u32 (uint32x2_t __a)
+{
+ return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_u64 (uint64x1_t __a)
+{
+ return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_p8 (poly8x8_t __a)
+{
+ return (poly64x1_t) __a;
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_p64_p16 (poly16x4_t __a)
+{
+ return (poly64x1_t)__a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_f64 (float64x2_t __a)
+{
+ return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_s8 (int8x16_t __a)
+{
+ return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_s16 (int16x8_t __a)
+{
+ return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_s32 (int32x4_t __a)
+{
+ return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_s64 (int64x2_t __a)
+{
+ return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_f16 (float16x8_t __a)
+{
+ return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_f32 (float32x4_t __a)
+{
+ return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_p128 (poly128_t __a)
+{
+ return (poly64x2_t)__a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_u8 (uint8x16_t __a)
+{
+ return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_u16 (uint16x8_t __a)
+{
+ return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_p16 (poly16x8_t __a)
+{
+ return (poly64x2_t)__a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_u32 (uint32x4_t __a)
+{
+ return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_u64 (uint64x2_t __a)
+{
+ return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p64_p8 (poly8x16_t __a)
+{
+ return (poly64x2_t) __a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_p8 (poly8x16_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_p16 (poly16x8_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_f16 (float16x8_t __a)
+{
+ return (poly128_t) __a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_f32 (float32x4_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_p64 (poly64x2_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_s64 (int64x2_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_u64 (uint64x2_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_s8 (int8x16_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_s16 (int16x8_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_s32 (int32x4_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_u8 (uint8x16_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_u16 (uint16x8_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_p128_u32 (uint32x4_t __a)
+{
+ return (poly128_t)__a;
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_f64 (float64x1_t __a)
{
return (float16x4_t) __a;
}
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_s8 (int8x8_t __a)
{
return (float16x4_t) __a;
}
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_s16 (int16x4_t __a)
{
return (float16x4_t) __a;
}
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_s32 (int32x2_t __a)
{
return (float16x4_t) __a;
}
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_s64 (int64x1_t __a)
{
return (float16x4_t) __a;
}
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_f32 (float32x2_t __a)
{
return (float16x4_t) __a;
}
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_u8 (uint8x8_t __a)
{
return (float16x4_t) __a;
}
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_u16 (uint16x4_t __a)
{
return (float16x4_t) __a;
}
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_u32 (uint32x2_t __a)
{
return (float16x4_t) __a;
}
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_u64 (uint64x1_t __a)
{
return (float16x4_t) __a;
}
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_p8 (poly8x8_t __a)
{
return (float16x4_t) __a;
}
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_p16 (poly16x4_t __a)
{
return (float16x4_t) __a;
}
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f16_p64 (poly64x1_t __a)
+{
+ return (float16x4_t) __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_f64 (float64x2_t __a)
{
return (float16x8_t) __a;
}
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_s8 (int8x16_t __a)
{
return (float16x8_t) __a;
}
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_s16 (int16x8_t __a)
{
return (float16x8_t) __a;
}
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_s32 (int32x4_t __a)
{
return (float16x8_t) __a;
}
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_s64 (int64x2_t __a)
{
return (float16x8_t) __a;
}
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_f32 (float32x4_t __a)
{
return (float16x8_t) __a;
}
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_u8 (uint8x16_t __a)
{
return (float16x8_t) __a;
}
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_u16 (uint16x8_t __a)
{
return (float16x8_t) __a;
}
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_u32 (uint32x4_t __a)
{
return (float16x8_t) __a;
}
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_u64 (uint64x2_t __a)
{
return (float16x8_t) __a;
}
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_p8 (poly8x16_t __a)
{
return (float16x8_t) __a;
}
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f16_p128 (poly128_t __a)
+{
+ return (float16x8_t) __a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_p16 (poly16x8_t __a)
{
return (float16x8_t) __a;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f16_p64 (poly64x2_t __a)
+{
+ return (float16x8_t) __a;
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_f16 (float16x4_t __a)
{
return (float32x2_t) __a;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_f64 (float64x1_t __a)
{
return (float32x2_t) __a;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_s8 (int8x8_t __a)
{
return (float32x2_t) __a;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_s16 (int16x4_t __a)
{
return (float32x2_t) __a;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_s32 (int32x2_t __a)
{
return (float32x2_t) __a;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_s64 (int64x1_t __a)
{
return (float32x2_t) __a;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_u8 (uint8x8_t __a)
{
return (float32x2_t) __a;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_u16 (uint16x4_t __a)
{
return (float32x2_t) __a;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_u32 (uint32x2_t __a)
{
return (float32x2_t) __a;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_u64 (uint64x1_t __a)
{
return (float32x2_t) __a;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_p8 (poly8x8_t __a)
{
return (float32x2_t) __a;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_p16 (poly16x4_t __a)
{
return (float32x2_t) __a;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f32_p64 (poly64x1_t __a)
+{
+ return (float32x2_t) __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_f16 (float16x8_t __a)
{
return (float32x4_t) __a;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_f64 (float64x2_t __a)
{
return (float32x4_t) __a;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_s8 (int8x16_t __a)
{
return (float32x4_t) __a;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_s16 (int16x8_t __a)
{
return (float32x4_t) __a;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_s32 (int32x4_t __a)
{
return (float32x4_t) __a;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_s64 (int64x2_t __a)
{
return (float32x4_t) __a;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_u8 (uint8x16_t __a)
{
return (float32x4_t) __a;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_u16 (uint16x8_t __a)
{
return (float32x4_t) __a;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_u32 (uint32x4_t __a)
{
return (float32x4_t) __a;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_u64 (uint64x2_t __a)
{
return (float32x4_t) __a;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_p8 (poly8x16_t __a)
{
return (float32x4_t) __a;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_p16 (poly16x8_t __a)
{
return (float32x4_t) __a;
}
-__extension__ static __inline float64x1_t __attribute__((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f32_p64 (poly64x2_t __a)
+{
+ return (float32x4_t) __a;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f32_p128 (poly128_t __a)
+{
+ return (float32x4_t)__a;
+}
+
+
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f64_f16 (float16x4_t __a)
{
return (float64x1_t) __a;
}
-__extension__ static __inline float64x1_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f64_f32 (float32x2_t __a)
{
return (float64x1_t) __a;
}
-__extension__ static __inline float64x1_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f64_p8 (poly8x8_t __a)
{
return (float64x1_t) __a;
}
-__extension__ static __inline float64x1_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f64_p16 (poly16x4_t __a)
{
return (float64x1_t) __a;
}
-__extension__ static __inline float64x1_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_f64_p64 (poly64x1_t __a)
+{
+ return (float64x1_t) __a;
+}
+
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f64_s8 (int8x8_t __a)
{
return (float64x1_t) __a;
}
-__extension__ static __inline float64x1_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f64_s16 (int16x4_t __a)
{
return (float64x1_t) __a;
}
-__extension__ static __inline float64x1_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f64_s32 (int32x2_t __a)
{
return (float64x1_t) __a;
}
-__extension__ static __inline float64x1_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f64_s64 (int64x1_t __a)
{
return (float64x1_t) __a;
}
-__extension__ static __inline float64x1_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f64_u8 (uint8x8_t __a)
{
return (float64x1_t) __a;
}
-__extension__ static __inline float64x1_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f64_u16 (uint16x4_t __a)
{
return (float64x1_t) __a;
}
-__extension__ static __inline float64x1_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f64_u32 (uint32x2_t __a)
{
return (float64x1_t) __a;
}
-__extension__ static __inline float64x1_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f64_u64 (uint64x1_t __a)
{
return (float64x1_t) __a;
}
-__extension__ static __inline float64x2_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f64_f16 (float16x8_t __a)
{
return (float64x2_t) __a;
}
-__extension__ static __inline float64x2_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f64_f32 (float32x4_t __a)
{
return (float64x2_t) __a;
}
-__extension__ static __inline float64x2_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f64_p8 (poly8x16_t __a)
{
return (float64x2_t) __a;
}
-__extension__ static __inline float64x2_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f64_p16 (poly16x8_t __a)
{
return (float64x2_t) __a;
}
-__extension__ static __inline float64x2_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_f64_p64 (poly64x2_t __a)
+{
+ return (float64x2_t) __a;
+}
+
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f64_s8 (int8x16_t __a)
{
return (float64x2_t) __a;
}
-__extension__ static __inline float64x2_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f64_s16 (int16x8_t __a)
{
return (float64x2_t) __a;
}
-__extension__ static __inline float64x2_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f64_s32 (int32x4_t __a)
{
return (float64x2_t) __a;
}
-__extension__ static __inline float64x2_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f64_s64 (int64x2_t __a)
{
return (float64x2_t) __a;
}
-__extension__ static __inline float64x2_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f64_u8 (uint8x16_t __a)
{
return (float64x2_t) __a;
}
-__extension__ static __inline float64x2_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f64_u16 (uint16x8_t __a)
{
return (float64x2_t) __a;
}
-__extension__ static __inline float64x2_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f64_u32 (uint32x4_t __a)
{
return (float64x2_t) __a;
}
-__extension__ static __inline float64x2_t __attribute__((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f64_u64 (uint64x2_t __a)
{
return (float64x2_t) __a;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_f16 (float16x4_t __a)
{
return (int64x1_t) __a;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_f64 (float64x1_t __a)
{
return (int64x1_t) __a;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_s8 (int8x8_t __a)
{
return (int64x1_t) __a;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_s16 (int16x4_t __a)
{
return (int64x1_t) __a;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_s32 (int32x2_t __a)
{
return (int64x1_t) __a;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_f32 (float32x2_t __a)
{
return (int64x1_t) __a;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_u8 (uint8x8_t __a)
{
return (int64x1_t) __a;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_u16 (uint16x4_t __a)
{
return (int64x1_t) __a;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_u32 (uint32x2_t __a)
{
return (int64x1_t) __a;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_u64 (uint64x1_t __a)
{
return (int64x1_t) __a;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_p8 (poly8x8_t __a)
{
return (int64x1_t) __a;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_p16 (poly16x4_t __a)
{
return (int64x1_t) __a;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s64_p64 (poly64x1_t __a)
+{
+ return (int64x1_t) __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_f64 (float64x2_t __a)
{
return (int64x2_t) __a;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_s8 (int8x16_t __a)
{
return (int64x2_t) __a;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_s16 (int16x8_t __a)
{
return (int64x2_t) __a;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_s32 (int32x4_t __a)
{
return (int64x2_t) __a;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_f16 (float16x8_t __a)
{
return (int64x2_t) __a;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_f32 (float32x4_t __a)
{
return (int64x2_t) __a;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_u8 (uint8x16_t __a)
{
return (int64x2_t) __a;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_u16 (uint16x8_t __a)
{
return (int64x2_t) __a;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_u32 (uint32x4_t __a)
{
return (int64x2_t) __a;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_u64 (uint64x2_t __a)
{
return (int64x2_t) __a;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_p8 (poly8x16_t __a)
{
return (int64x2_t) __a;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_p16 (poly16x8_t __a)
{
return (int64x2_t) __a;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s64_p64 (poly64x2_t __a)
+{
+ return (int64x2_t) __a;
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s64_p128 (poly128_t __a)
+{
+ return (int64x2_t)__a;
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_f16 (float16x4_t __a)
{
return (uint64x1_t) __a;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_f64 (float64x1_t __a)
{
return (uint64x1_t) __a;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_s8 (int8x8_t __a)
{
return (uint64x1_t) __a;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_s16 (int16x4_t __a)
{
return (uint64x1_t) __a;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_s32 (int32x2_t __a)
{
return (uint64x1_t) __a;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_s64 (int64x1_t __a)
{
return (uint64x1_t) __a;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_f32 (float32x2_t __a)
{
return (uint64x1_t) __a;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_u8 (uint8x8_t __a)
{
return (uint64x1_t) __a;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_u16 (uint16x4_t __a)
{
return (uint64x1_t) __a;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_u32 (uint32x2_t __a)
{
return (uint64x1_t) __a;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_p8 (poly8x8_t __a)
{
return (uint64x1_t) __a;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_p16 (poly16x4_t __a)
{
return (uint64x1_t) __a;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u64_p64 (poly64x1_t __a)
+{
+ return (uint64x1_t) __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_f64 (float64x2_t __a)
{
return (uint64x2_t) __a;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_s8 (int8x16_t __a)
{
return (uint64x2_t) __a;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_s16 (int16x8_t __a)
{
return (uint64x2_t) __a;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_s32 (int32x4_t __a)
{
return (uint64x2_t) __a;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_s64 (int64x2_t __a)
{
return (uint64x2_t) __a;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_f16 (float16x8_t __a)
{
return (uint64x2_t) __a;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_f32 (float32x4_t __a)
{
return (uint64x2_t) __a;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_u8 (uint8x16_t __a)
{
return (uint64x2_t) __a;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_u16 (uint16x8_t __a)
{
return (uint64x2_t) __a;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_u32 (uint32x4_t __a)
{
return (uint64x2_t) __a;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_p8 (poly8x16_t __a)
{
return (uint64x2_t) __a;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_p16 (poly16x8_t __a)
{
return (uint64x2_t) __a;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u64_p64 (poly64x2_t __a)
+{
+ return (uint64x2_t) __a;
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u64_p128 (poly128_t __a)
+{
+ return (uint64x2_t)__a;
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_f16 (float16x4_t __a)
{
return (int8x8_t) __a;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_f64 (float64x1_t __a)
{
return (int8x8_t) __a;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_s16 (int16x4_t __a)
{
return (int8x8_t) __a;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_s32 (int32x2_t __a)
{
return (int8x8_t) __a;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_s64 (int64x1_t __a)
{
return (int8x8_t) __a;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_f32 (float32x2_t __a)
{
return (int8x8_t) __a;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_u8 (uint8x8_t __a)
{
return (int8x8_t) __a;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_u16 (uint16x4_t __a)
{
return (int8x8_t) __a;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_u32 (uint32x2_t __a)
{
return (int8x8_t) __a;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_u64 (uint64x1_t __a)
{
return (int8x8_t) __a;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_p8 (poly8x8_t __a)
{
return (int8x8_t) __a;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_p16 (poly16x4_t __a)
{
return (int8x8_t) __a;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s8_p64 (poly64x1_t __a)
+{
+ return (int8x8_t) __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_f64 (float64x2_t __a)
{
return (int8x16_t) __a;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_s16 (int16x8_t __a)
{
return (int8x16_t) __a;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_s32 (int32x4_t __a)
{
return (int8x16_t) __a;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_s64 (int64x2_t __a)
{
return (int8x16_t) __a;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_f16 (float16x8_t __a)
{
return (int8x16_t) __a;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_f32 (float32x4_t __a)
{
return (int8x16_t) __a;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_u8 (uint8x16_t __a)
{
return (int8x16_t) __a;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_u16 (uint16x8_t __a)
{
return (int8x16_t) __a;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_u32 (uint32x4_t __a)
{
return (int8x16_t) __a;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_u64 (uint64x2_t __a)
{
return (int8x16_t) __a;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_p8 (poly8x16_t __a)
{
return (int8x16_t) __a;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_p16 (poly16x8_t __a)
{
return (int8x16_t) __a;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s8_p64 (poly64x2_t __a)
+{
+ return (int8x16_t) __a;
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s8_p128 (poly128_t __a)
+{
+ return (int8x16_t)__a;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_f16 (float16x4_t __a)
{
return (int16x4_t) __a;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_f64 (float64x1_t __a)
{
return (int16x4_t) __a;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_s8 (int8x8_t __a)
{
return (int16x4_t) __a;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_s32 (int32x2_t __a)
{
return (int16x4_t) __a;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_s64 (int64x1_t __a)
{
return (int16x4_t) __a;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_f32 (float32x2_t __a)
{
return (int16x4_t) __a;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_u8 (uint8x8_t __a)
{
return (int16x4_t) __a;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_u16 (uint16x4_t __a)
{
return (int16x4_t) __a;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_u32 (uint32x2_t __a)
{
return (int16x4_t) __a;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_u64 (uint64x1_t __a)
{
return (int16x4_t) __a;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_p8 (poly8x8_t __a)
{
return (int16x4_t) __a;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_p16 (poly16x4_t __a)
{
return (int16x4_t) __a;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s16_p64 (poly64x1_t __a)
+{
+ return (int16x4_t) __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_f64 (float64x2_t __a)
{
return (int16x8_t) __a;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_s8 (int8x16_t __a)
{
return (int16x8_t) __a;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_s32 (int32x4_t __a)
{
return (int16x8_t) __a;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_s64 (int64x2_t __a)
{
return (int16x8_t) __a;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_f16 (float16x8_t __a)
{
return (int16x8_t) __a;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_f32 (float32x4_t __a)
{
return (int16x8_t) __a;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_u8 (uint8x16_t __a)
{
return (int16x8_t) __a;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_u16 (uint16x8_t __a)
{
return (int16x8_t) __a;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_u32 (uint32x4_t __a)
{
return (int16x8_t) __a;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_u64 (uint64x2_t __a)
{
return (int16x8_t) __a;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_p8 (poly8x16_t __a)
{
return (int16x8_t) __a;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_p16 (poly16x8_t __a)
{
return (int16x8_t) __a;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s16_p64 (poly64x2_t __a)
+{
+ return (int16x8_t) __a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s16_p128 (poly128_t __a)
+{
+ return (int16x8_t)__a;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_f16 (float16x4_t __a)
{
return (int32x2_t) __a;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_f64 (float64x1_t __a)
{
return (int32x2_t) __a;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_s8 (int8x8_t __a)
{
return (int32x2_t) __a;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_s16 (int16x4_t __a)
{
return (int32x2_t) __a;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_s64 (int64x1_t __a)
{
return (int32x2_t) __a;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_f32 (float32x2_t __a)
{
return (int32x2_t) __a;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_u8 (uint8x8_t __a)
{
return (int32x2_t) __a;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_u16 (uint16x4_t __a)
{
return (int32x2_t) __a;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_u32 (uint32x2_t __a)
{
return (int32x2_t) __a;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_u64 (uint64x1_t __a)
{
return (int32x2_t) __a;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_p8 (poly8x8_t __a)
{
return (int32x2_t) __a;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_p16 (poly16x4_t __a)
{
return (int32x2_t) __a;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_s32_p64 (poly64x1_t __a)
+{
+ return (int32x2_t) __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_f64 (float64x2_t __a)
{
return (int32x4_t) __a;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_s8 (int8x16_t __a)
{
return (int32x4_t) __a;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_s16 (int16x8_t __a)
{
return (int32x4_t) __a;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_s64 (int64x2_t __a)
{
return (int32x4_t) __a;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_f16 (float16x8_t __a)
{
return (int32x4_t) __a;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_f32 (float32x4_t __a)
{
return (int32x4_t) __a;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_u8 (uint8x16_t __a)
{
return (int32x4_t) __a;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_u16 (uint16x8_t __a)
{
return (int32x4_t) __a;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_u32 (uint32x4_t __a)
{
return (int32x4_t) __a;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_u64 (uint64x2_t __a)
{
return (int32x4_t) __a;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_p8 (poly8x16_t __a)
{
return (int32x4_t) __a;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_p16 (poly16x8_t __a)
{
return (int32x4_t) __a;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s32_p64 (poly64x2_t __a)
+{
+ return (int32x4_t) __a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_s32_p128 (poly128_t __a)
+{
+ return (int32x4_t)__a;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_f16 (float16x4_t __a)
{
return (uint8x8_t) __a;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_f64 (float64x1_t __a)
{
return (uint8x8_t) __a;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_s8 (int8x8_t __a)
{
return (uint8x8_t) __a;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_s16 (int16x4_t __a)
{
return (uint8x8_t) __a;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_s32 (int32x2_t __a)
{
return (uint8x8_t) __a;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_s64 (int64x1_t __a)
{
return (uint8x8_t) __a;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_f32 (float32x2_t __a)
{
return (uint8x8_t) __a;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_u16 (uint16x4_t __a)
{
return (uint8x8_t) __a;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_u32 (uint32x2_t __a)
{
return (uint8x8_t) __a;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_u64 (uint64x1_t __a)
{
return (uint8x8_t) __a;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_p8 (poly8x8_t __a)
{
return (uint8x8_t) __a;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_p16 (poly16x4_t __a)
{
return (uint8x8_t) __a;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u8_p64 (poly64x1_t __a)
+{
+ return (uint8x8_t) __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_f64 (float64x2_t __a)
{
return (uint8x16_t) __a;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_s8 (int8x16_t __a)
{
return (uint8x16_t) __a;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_s16 (int16x8_t __a)
{
return (uint8x16_t) __a;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_s32 (int32x4_t __a)
{
return (uint8x16_t) __a;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_s64 (int64x2_t __a)
{
return (uint8x16_t) __a;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_f16 (float16x8_t __a)
{
return (uint8x16_t) __a;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_f32 (float32x4_t __a)
{
return (uint8x16_t) __a;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_u16 (uint16x8_t __a)
{
return (uint8x16_t) __a;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_u32 (uint32x4_t __a)
{
return (uint8x16_t) __a;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_u64 (uint64x2_t __a)
{
return (uint8x16_t) __a;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_p8 (poly8x16_t __a)
{
return (uint8x16_t) __a;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_p16 (poly16x8_t __a)
{
return (uint8x16_t) __a;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u8_p64 (poly64x2_t __a)
+{
+ return (uint8x16_t) __a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u8_p128 (poly128_t __a)
+{
+ return (uint8x16_t)__a;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_f16 (float16x4_t __a)
{
return (uint16x4_t) __a;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_f64 (float64x1_t __a)
{
return (uint16x4_t) __a;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_s8 (int8x8_t __a)
{
return (uint16x4_t) __a;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_s16 (int16x4_t __a)
{
return (uint16x4_t) __a;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_s32 (int32x2_t __a)
{
return (uint16x4_t) __a;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_s64 (int64x1_t __a)
{
return (uint16x4_t) __a;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_f32 (float32x2_t __a)
{
return (uint16x4_t) __a;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_u8 (uint8x8_t __a)
{
return (uint16x4_t) __a;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_u32 (uint32x2_t __a)
{
return (uint16x4_t) __a;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_u64 (uint64x1_t __a)
{
return (uint16x4_t) __a;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_p8 (poly8x8_t __a)
{
return (uint16x4_t) __a;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_p16 (poly16x4_t __a)
{
return (uint16x4_t) __a;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u16_p64 (poly64x1_t __a)
+{
+ return (uint16x4_t) __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_f64 (float64x2_t __a)
{
return (uint16x8_t) __a;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_s8 (int8x16_t __a)
{
return (uint16x8_t) __a;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_s16 (int16x8_t __a)
{
return (uint16x8_t) __a;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_s32 (int32x4_t __a)
{
return (uint16x8_t) __a;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_s64 (int64x2_t __a)
{
return (uint16x8_t) __a;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_f16 (float16x8_t __a)
{
return (uint16x8_t) __a;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_f32 (float32x4_t __a)
{
return (uint16x8_t) __a;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_u8 (uint8x16_t __a)
{
return (uint16x8_t) __a;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_u32 (uint32x4_t __a)
{
return (uint16x8_t) __a;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_u64 (uint64x2_t __a)
{
return (uint16x8_t) __a;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_p8 (poly8x16_t __a)
{
return (uint16x8_t) __a;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_p16 (poly16x8_t __a)
{
return (uint16x8_t) __a;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u16_p64 (poly64x2_t __a)
+{
+ return (uint16x8_t) __a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u16_p128 (poly128_t __a)
+{
+ return (uint16x8_t)__a;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_f16 (float16x4_t __a)
{
return (uint32x2_t) __a;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_f64 (float64x1_t __a)
{
return (uint32x2_t) __a;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_s8 (int8x8_t __a)
{
return (uint32x2_t) __a;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_s16 (int16x4_t __a)
{
return (uint32x2_t) __a;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_s32 (int32x2_t __a)
{
return (uint32x2_t) __a;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_s64 (int64x1_t __a)
{
return (uint32x2_t) __a;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_f32 (float32x2_t __a)
{
return (uint32x2_t) __a;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_u8 (uint8x8_t __a)
{
return (uint32x2_t) __a;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_u16 (uint16x4_t __a)
{
return (uint32x2_t) __a;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_u64 (uint64x1_t __a)
{
return (uint32x2_t) __a;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_p8 (poly8x8_t __a)
{
return (uint32x2_t) __a;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_p16 (poly16x4_t __a)
{
return (uint32x2_t) __a;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpret_u32_p64 (poly64x1_t __a)
+{
+ return (uint32x2_t) __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_f64 (float64x2_t __a)
{
return (uint32x4_t) __a;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_s8 (int8x16_t __a)
{
return (uint32x4_t) __a;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_s16 (int16x8_t __a)
{
return (uint32x4_t) __a;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_s32 (int32x4_t __a)
{
return (uint32x4_t) __a;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_s64 (int64x2_t __a)
{
return (uint32x4_t) __a;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_f16 (float16x8_t __a)
{
return (uint32x4_t) __a;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_f32 (float32x4_t __a)
{
return (uint32x4_t) __a;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_u8 (uint8x16_t __a)
{
return (uint32x4_t) __a;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_u16 (uint16x8_t __a)
{
return (uint32x4_t) __a;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_u64 (uint64x2_t __a)
{
return (uint32x4_t) __a;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_p8 (poly8x16_t __a)
{
return (uint32x4_t) __a;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_p16 (poly16x8_t __a)
{
return (uint32x4_t) __a;
}
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u32_p64 (poly64x2_t __a)
+{
+ return (uint32x4_t) __a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vreinterpretq_u32_p128 (poly128_t __a)
+{
+ return (uint32x4_t)__a;
+}
+
/* vset_lane */
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_f16 (float16_t __elem, float16x4_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_f32 (float32_t __elem, float32x2_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_f64 (float64_t __elem, float64x1_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_p8 (poly8_t __elem, poly8x8_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_p16 (poly16_t __elem, poly16x4_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vset_lane_p64 (poly64_t __elem, poly64x1_t __vec, const int __index)
+{
+ return __aarch64_vset_lane_any (__elem, __vec, __index);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_s8 (int8_t __elem, int8x8_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_s16 (int16_t __elem, int16x4_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_s32 (int32_t __elem, int32x2_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_s64 (int64_t __elem, int64x1_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_u8 (uint8_t __elem, uint8x8_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_u16 (uint16_t __elem, uint16x4_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_u32 (uint32_t __elem, uint32x2_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_u64 (uint64_t __elem, uint64x1_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
@@ -4843,79 +6157,99 @@ vset_lane_u64 (uint64_t __elem, uint64x1_t __vec, const int __index)
/* vsetq_lane */
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_f16 (float16_t __elem, float16x8_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_f32 (float32_t __elem, float32x4_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_f64 (float64_t __elem, float64x2_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_p8 (poly8_t __elem, poly8x16_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_p16 (poly16_t __elem, poly16x8_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsetq_lane_p64 (poly64_t __elem, poly64x2_t __vec, const int __index)
+{
+ return __aarch64_vset_lane_any (__elem, __vec, __index);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_s8 (int8_t __elem, int8x16_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_s16 (int16_t __elem, int16x8_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_s32 (int32_t __elem, int32x4_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_s64 (int64_t __elem, int64x2_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_u8 (uint8_t __elem, uint8x16_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_u16 (uint16_t __elem, uint16x8_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_u32 (uint32_t __elem, uint32x4_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_u64 (uint64_t __elem, uint64x2_t __vec, const int __index)
{
return __aarch64_vset_lane_any (__elem, __vec, __index);
@@ -4926,79 +6260,99 @@ vsetq_lane_u64 (uint64_t __elem, uint64x2_t __vec, const int __index)
uint64x1_t lo = vcreate_u64 (vgetq_lane_u64 (tmp, 0)); \
return vreinterpret_##__TYPE##_u64 (lo);
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_low_f16 (float16x8_t __a)
{
__GET_LOW (f16);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_low_f32 (float32x4_t __a)
{
__GET_LOW (f32);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_low_f64 (float64x2_t __a)
{
return (float64x1_t) {vgetq_lane_f64 (__a, 0)};
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_low_p8 (poly8x16_t __a)
{
__GET_LOW (p8);
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_low_p16 (poly16x8_t __a)
{
__GET_LOW (p16);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_low_p64 (poly64x2_t __a)
+{
+ __GET_LOW (p64);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_low_s8 (int8x16_t __a)
{
__GET_LOW (s8);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_low_s16 (int16x8_t __a)
{
__GET_LOW (s16);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_low_s32 (int32x4_t __a)
{
__GET_LOW (s32);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_low_s64 (int64x2_t __a)
{
__GET_LOW (s64);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_low_u8 (uint8x16_t __a)
{
__GET_LOW (u8);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_low_u16 (uint16x8_t __a)
{
__GET_LOW (u16);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_low_u32 (uint32x4_t __a)
{
__GET_LOW (u32);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_low_u64 (uint64x2_t __a)
{
return vcreate_u64 (vgetq_lane_u64 (__a, 0));
@@ -5011,73 +6365,92 @@ vget_low_u64 (uint64x2_t __a)
uint64x1_t hi = vcreate_u64 (vgetq_lane_u64 (tmp, 1)); \
return vreinterpret_##__TYPE##_u64 (hi);
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_high_f16 (float16x8_t __a)
{
__GET_HIGH (f16);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_high_f32 (float32x4_t __a)
{
__GET_HIGH (f32);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_high_f64 (float64x2_t __a)
{
__GET_HIGH (f64);
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_high_p8 (poly8x16_t __a)
{
__GET_HIGH (p8);
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_high_p16 (poly16x8_t __a)
{
__GET_HIGH (p16);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vget_high_p64 (poly64x2_t __a)
+{
+ __GET_HIGH (p64);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_high_s8 (int8x16_t __a)
{
__GET_HIGH (s8);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_high_s16 (int16x8_t __a)
{
__GET_HIGH (s16);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_high_s32 (int32x4_t __a)
{
__GET_HIGH (s32);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_high_s64 (int64x2_t __a)
{
__GET_HIGH (s64);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_high_u8 (uint8x16_t __a)
{
__GET_HIGH (u8);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_high_u16 (uint16x8_t __a)
{
__GET_HIGH (u16);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_high_u32 (uint32x4_t __a)
{
__GET_HIGH (u32);
@@ -5085,98 +6458,120 @@ vget_high_u32 (uint32x4_t __a)
#undef __GET_HIGH
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vget_high_u64 (uint64x2_t __a)
{
return vcreate_u64 (vgetq_lane_u64 (__a, 1));
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcombine_s8 (int8x8_t __a, int8x8_t __b)
{
return (int8x16_t) __builtin_aarch64_combinev8qi (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcombine_s16 (int16x4_t __a, int16x4_t __b)
{
return (int16x8_t) __builtin_aarch64_combinev4hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcombine_s32 (int32x2_t __a, int32x2_t __b)
{
return (int32x4_t) __builtin_aarch64_combinev2si (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcombine_s64 (int64x1_t __a, int64x1_t __b)
{
return __builtin_aarch64_combinedi (__a[0], __b[0]);
}
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcombine_f16 (float16x4_t __a, float16x4_t __b)
{
return __builtin_aarch64_combinev4hf (__a, __b);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcombine_f32 (float32x2_t __a, float32x2_t __b)
{
return (float32x4_t) __builtin_aarch64_combinev2sf (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcombine_u8 (uint8x8_t __a, uint8x8_t __b)
{
return (uint8x16_t) __builtin_aarch64_combinev8qi ((int8x8_t) __a,
(int8x8_t) __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcombine_u16 (uint16x4_t __a, uint16x4_t __b)
{
return (uint16x8_t) __builtin_aarch64_combinev4hi ((int16x4_t) __a,
(int16x4_t) __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcombine_u32 (uint32x2_t __a, uint32x2_t __b)
{
return (uint32x4_t) __builtin_aarch64_combinev2si ((int32x2_t) __a,
(int32x2_t) __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcombine_u64 (uint64x1_t __a, uint64x1_t __b)
{
return (uint64x2_t) __builtin_aarch64_combinedi (__a[0], __b[0]);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcombine_f64 (float64x1_t __a, float64x1_t __b)
{
return __builtin_aarch64_combinedf (__a[0], __b[0]);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcombine_p8 (poly8x8_t __a, poly8x8_t __b)
{
return (poly8x16_t) __builtin_aarch64_combinev8qi ((int8x8_t) __a,
(int8x8_t) __b);
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcombine_p16 (poly16x4_t __a, poly16x4_t __b)
{
return (poly16x8_t) __builtin_aarch64_combinev4hi ((int16x4_t) __a,
(int16x4_t) __b);
}
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcombine_p64 (poly64x1_t __a, poly64x1_t __b)
+{
+ return (poly64x2_t) __builtin_aarch64_combinedi_ppp (__a[0], __b[0]);
+}
+
/* Start of temporary inline asm implementations. */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaba_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
{
int8x8_t result;
@@ -5187,7 +6582,8 @@ vaba_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
return result;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaba_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
{
int16x4_t result;
@@ -5198,7 +6594,8 @@ vaba_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
return result;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaba_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
{
int32x2_t result;
@@ -5209,7 +6606,8 @@ vaba_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
return result;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaba_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
{
uint8x8_t result;
@@ -5220,7 +6618,8 @@ vaba_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
return result;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaba_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
{
uint16x4_t result;
@@ -5231,7 +6630,8 @@ vaba_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
return result;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaba_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
{
uint32x2_t result;
@@ -5242,7 +6642,8 @@ vaba_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
{
int16x8_t result;
@@ -5253,7 +6654,8 @@ vabal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
{
int32x4_t result;
@@ -5264,7 +6666,8 @@ vabal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
{
int64x2_t result;
@@ -5275,7 +6678,8 @@ vabal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
{
uint16x8_t result;
@@ -5286,7 +6690,8 @@ vabal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
{
uint32x4_t result;
@@ -5297,7 +6702,8 @@ vabal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
{
uint64x2_t result;
@@ -5308,7 +6714,8 @@ vabal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabal_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
{
int16x8_t result;
@@ -5319,7 +6726,8 @@ vabal_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabal_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
{
int32x4_t result;
@@ -5330,7 +6738,8 @@ vabal_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabal_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
{
int64x2_t result;
@@ -5341,7 +6750,8 @@ vabal_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
{
uint16x8_t result;
@@ -5352,7 +6762,8 @@ vabal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
{
uint32x4_t result;
@@ -5363,7 +6774,8 @@ vabal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
{
uint64x2_t result;
@@ -5374,7 +6786,8 @@ vabal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
return result;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
{
int8x16_t result;
@@ -5385,7 +6798,8 @@ vabaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
{
int16x8_t result;
@@ -5396,7 +6810,8 @@ vabaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
{
int32x4_t result;
@@ -5407,7 +6822,8 @@ vabaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
return result;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
{
uint8x16_t result;
@@ -5418,7 +6834,8 @@ vabaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
{
uint16x8_t result;
@@ -5429,7 +6846,8 @@ vabaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
{
uint32x4_t result;
@@ -5440,18 +6858,8 @@ vabaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
return result;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vabd_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("fabd %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabd_s8 (int8x8_t a, int8x8_t b)
{
int8x8_t result;
@@ -5462,7 +6870,8 @@ vabd_s8 (int8x8_t a, int8x8_t b)
return result;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabd_s16 (int16x4_t a, int16x4_t b)
{
int16x4_t result;
@@ -5473,7 +6882,8 @@ vabd_s16 (int16x4_t a, int16x4_t b)
return result;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabd_s32 (int32x2_t a, int32x2_t b)
{
int32x2_t result;
@@ -5484,7 +6894,8 @@ vabd_s32 (int32x2_t a, int32x2_t b)
return result;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabd_u8 (uint8x8_t a, uint8x8_t b)
{
uint8x8_t result;
@@ -5495,7 +6906,8 @@ vabd_u8 (uint8x8_t a, uint8x8_t b)
return result;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabd_u16 (uint16x4_t a, uint16x4_t b)
{
uint16x4_t result;
@@ -5506,7 +6918,8 @@ vabd_u16 (uint16x4_t a, uint16x4_t b)
return result;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabd_u32 (uint32x2_t a, uint32x2_t b)
{
uint32x2_t result;
@@ -5517,18 +6930,8 @@ vabd_u32 (uint32x2_t a, uint32x2_t b)
return result;
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vabdd_f64 (float64_t a, float64_t b)
-{
- float64_t result;
- __asm__ ("fabd %d0, %d1, %d2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_high_s8 (int8x16_t a, int8x16_t b)
{
int16x8_t result;
@@ -5539,7 +6942,8 @@ vabdl_high_s8 (int8x16_t a, int8x16_t b)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_high_s16 (int16x8_t a, int16x8_t b)
{
int32x4_t result;
@@ -5550,7 +6954,8 @@ vabdl_high_s16 (int16x8_t a, int16x8_t b)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_high_s32 (int32x4_t a, int32x4_t b)
{
int64x2_t result;
@@ -5561,7 +6966,8 @@ vabdl_high_s32 (int32x4_t a, int32x4_t b)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_high_u8 (uint8x16_t a, uint8x16_t b)
{
uint16x8_t result;
@@ -5572,7 +6978,8 @@ vabdl_high_u8 (uint8x16_t a, uint8x16_t b)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_high_u16 (uint16x8_t a, uint16x8_t b)
{
uint32x4_t result;
@@ -5583,7 +6990,8 @@ vabdl_high_u16 (uint16x8_t a, uint16x8_t b)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_high_u32 (uint32x4_t a, uint32x4_t b)
{
uint64x2_t result;
@@ -5594,7 +7002,8 @@ vabdl_high_u32 (uint32x4_t a, uint32x4_t b)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_s8 (int8x8_t a, int8x8_t b)
{
int16x8_t result;
@@ -5605,7 +7014,8 @@ vabdl_s8 (int8x8_t a, int8x8_t b)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_s16 (int16x4_t a, int16x4_t b)
{
int32x4_t result;
@@ -5616,7 +7026,8 @@ vabdl_s16 (int16x4_t a, int16x4_t b)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_s32 (int32x2_t a, int32x2_t b)
{
int64x2_t result;
@@ -5627,7 +7038,8 @@ vabdl_s32 (int32x2_t a, int32x2_t b)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_u8 (uint8x8_t a, uint8x8_t b)
{
uint16x8_t result;
@@ -5638,7 +7050,8 @@ vabdl_u8 (uint8x8_t a, uint8x8_t b)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_u16 (uint16x4_t a, uint16x4_t b)
{
uint32x4_t result;
@@ -5649,7 +7062,8 @@ vabdl_u16 (uint16x4_t a, uint16x4_t b)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdl_u32 (uint32x2_t a, uint32x2_t b)
{
uint64x2_t result;
@@ -5660,29 +7074,8 @@ vabdl_u32 (uint32x2_t a, uint32x2_t b)
return result;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vabdq_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("fabd %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vabdq_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("fabd %0.2d, %1.2d, %2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdq_s8 (int8x16_t a, int8x16_t b)
{
int8x16_t result;
@@ -5693,7 +7086,8 @@ vabdq_s8 (int8x16_t a, int8x16_t b)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdq_s16 (int16x8_t a, int16x8_t b)
{
int16x8_t result;
@@ -5704,7 +7098,8 @@ vabdq_s16 (int16x8_t a, int16x8_t b)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdq_s32 (int32x4_t a, int32x4_t b)
{
int32x4_t result;
@@ -5715,7 +7110,8 @@ vabdq_s32 (int32x4_t a, int32x4_t b)
return result;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdq_u8 (uint8x16_t a, uint8x16_t b)
{
uint8x16_t result;
@@ -5726,7 +7122,8 @@ vabdq_u8 (uint8x16_t a, uint8x16_t b)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdq_u16 (uint16x8_t a, uint16x8_t b)
{
uint16x8_t result;
@@ -5737,7 +7134,8 @@ vabdq_u16 (uint16x8_t a, uint16x8_t b)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabdq_u32 (uint32x4_t a, uint32x4_t b)
{
uint32x4_t result;
@@ -5748,18 +7146,8 @@ vabdq_u32 (uint32x4_t a, uint32x4_t b)
return result;
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vabds_f32 (float32_t a, float32_t b)
-{
- float32_t result;
- __asm__ ("fabd %s0, %s1, %s2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddlv_s8 (int8x8_t a)
{
int16_t result;
@@ -5770,7 +7158,8 @@ vaddlv_s8 (int8x8_t a)
return result;
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddlv_s16 (int16x4_t a)
{
int32_t result;
@@ -5781,7 +7170,8 @@ vaddlv_s16 (int16x4_t a)
return result;
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddlv_u8 (uint8x8_t a)
{
uint16_t result;
@@ -5792,7 +7182,8 @@ vaddlv_u8 (uint8x8_t a)
return result;
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddlv_u16 (uint16x4_t a)
{
uint32_t result;
@@ -5803,7 +7194,8 @@ vaddlv_u16 (uint16x4_t a)
return result;
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddlvq_s8 (int8x16_t a)
{
int16_t result;
@@ -5814,7 +7206,8 @@ vaddlvq_s8 (int8x16_t a)
return result;
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddlvq_s16 (int16x8_t a)
{
int32_t result;
@@ -5825,7 +7218,8 @@ vaddlvq_s16 (int16x8_t a)
return result;
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddlvq_s32 (int32x4_t a)
{
int64_t result;
@@ -5836,7 +7230,8 @@ vaddlvq_s32 (int32x4_t a)
return result;
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddlvq_u8 (uint8x16_t a)
{
uint16_t result;
@@ -5847,7 +7242,8 @@ vaddlvq_u8 (uint8x16_t a)
return result;
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddlvq_u16 (uint16x8_t a)
{
uint32_t result;
@@ -5858,7 +7254,8 @@ vaddlvq_u16 (uint16x8_t a)
return result;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddlvq_u32 (uint32x4_t a)
{
uint64_t result;
@@ -5869,403 +7266,8 @@ vaddlvq_u32 (uint32x4_t a)
return result;
}
-#define vcopyq_lane_f32(a, b, c, d) \
- __extension__ \
- ({ \
- float32x4_t c_ = (c); \
- float32x4_t a_ = (a); \
- float32x4_t result; \
- __asm__ ("ins %0.s[%2], %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_f64(a, b, c, d) \
- __extension__ \
- ({ \
- float64x2_t c_ = (c); \
- float64x2_t a_ = (a); \
- float64x2_t result; \
- __asm__ ("ins %0.d[%2], %3.d[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_p8(a, b, c, d) \
- __extension__ \
- ({ \
- poly8x16_t c_ = (c); \
- poly8x16_t a_ = (a); \
- poly8x16_t result; \
- __asm__ ("ins %0.b[%2], %3.b[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_p16(a, b, c, d) \
- __extension__ \
- ({ \
- poly16x8_t c_ = (c); \
- poly16x8_t a_ = (a); \
- poly16x8_t result; \
- __asm__ ("ins %0.h[%2], %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_s8(a, b, c, d) \
- __extension__ \
- ({ \
- int8x16_t c_ = (c); \
- int8x16_t a_ = (a); \
- int8x16_t result; \
- __asm__ ("ins %0.b[%2], %3.b[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x8_t c_ = (c); \
- int16x8_t a_ = (a); \
- int16x8_t result; \
- __asm__ ("ins %0.h[%2], %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x4_t c_ = (c); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("ins %0.s[%2], %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_s64(a, b, c, d) \
- __extension__ \
- ({ \
- int64x2_t c_ = (c); \
- int64x2_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("ins %0.d[%2], %3.d[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_u8(a, b, c, d) \
- __extension__ \
- ({ \
- uint8x16_t c_ = (c); \
- uint8x16_t a_ = (a); \
- uint8x16_t result; \
- __asm__ ("ins %0.b[%2], %3.b[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x8_t c_ = (c); \
- uint16x8_t a_ = (a); \
- uint16x8_t result; \
- __asm__ ("ins %0.h[%2], %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x4_t c_ = (c); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("ins %0.s[%2], %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcopyq_lane_u64(a, b, c, d) \
- __extension__ \
- ({ \
- uint64x2_t c_ = (c); \
- uint64x2_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("ins %0.d[%2], %3.d[%4]" \
- : "=w"(result) \
- : "0"(a_), "i"(b), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvt_n_f32_s32(a, b) \
- __extension__ \
- ({ \
- int32x2_t a_ = (a); \
- float32x2_t result; \
- __asm__ ("scvtf %0.2s, %1.2s, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvt_n_f32_u32(a, b) \
- __extension__ \
- ({ \
- uint32x2_t a_ = (a); \
- float32x2_t result; \
- __asm__ ("ucvtf %0.2s, %1.2s, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvt_n_s32_f32(a, b) \
- __extension__ \
- ({ \
- float32x2_t a_ = (a); \
- int32x2_t result; \
- __asm__ ("fcvtzs %0.2s, %1.2s, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvt_n_u32_f32(a, b) \
- __extension__ \
- ({ \
- float32x2_t a_ = (a); \
- uint32x2_t result; \
- __asm__ ("fcvtzu %0.2s, %1.2s, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtd_n_f64_s64(a, b) \
- __extension__ \
- ({ \
- int64_t a_ = (a); \
- float64_t result; \
- __asm__ ("scvtf %d0,%d1,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtd_n_f64_u64(a, b) \
- __extension__ \
- ({ \
- uint64_t a_ = (a); \
- float64_t result; \
- __asm__ ("ucvtf %d0,%d1,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtd_n_s64_f64(a, b) \
- __extension__ \
- ({ \
- float64_t a_ = (a); \
- int64_t result; \
- __asm__ ("fcvtzs %d0,%d1,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtd_n_u64_f64(a, b) \
- __extension__ \
- ({ \
- float64_t a_ = (a); \
- uint64_t result; \
- __asm__ ("fcvtzu %d0,%d1,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtq_n_f32_s32(a, b) \
- __extension__ \
- ({ \
- int32x4_t a_ = (a); \
- float32x4_t result; \
- __asm__ ("scvtf %0.4s, %1.4s, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtq_n_f32_u32(a, b) \
- __extension__ \
- ({ \
- uint32x4_t a_ = (a); \
- float32x4_t result; \
- __asm__ ("ucvtf %0.4s, %1.4s, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtq_n_f64_s64(a, b) \
- __extension__ \
- ({ \
- int64x2_t a_ = (a); \
- float64x2_t result; \
- __asm__ ("scvtf %0.2d, %1.2d, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtq_n_f64_u64(a, b) \
- __extension__ \
- ({ \
- uint64x2_t a_ = (a); \
- float64x2_t result; \
- __asm__ ("ucvtf %0.2d, %1.2d, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtq_n_s32_f32(a, b) \
- __extension__ \
- ({ \
- float32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("fcvtzs %0.4s, %1.4s, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtq_n_s64_f64(a, b) \
- __extension__ \
- ({ \
- float64x2_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("fcvtzs %0.2d, %1.2d, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtq_n_u32_f32(a, b) \
- __extension__ \
- ({ \
- float32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("fcvtzu %0.4s, %1.4s, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvtq_n_u64_f64(a, b) \
- __extension__ \
- ({ \
- float64x2_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("fcvtzu %0.2d, %1.2d, #%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvts_n_f32_s32(a, b) \
- __extension__ \
- ({ \
- int32_t a_ = (a); \
- float32_t result; \
- __asm__ ("scvtf %s0,%s1,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvts_n_f32_u32(a, b) \
- __extension__ \
- ({ \
- uint32_t a_ = (a); \
- float32_t result; \
- __asm__ ("ucvtf %s0,%s1,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvts_n_s32_f32(a, b) \
- __extension__ \
- ({ \
- float32_t a_ = (a); \
- int32_t result; \
- __asm__ ("fcvtzs %s0,%s1,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vcvts_n_u32_f32(a, b) \
- __extension__ \
- ({ \
- float32_t a_ = (a); \
- uint32_t result; \
- __asm__ ("fcvtzu %s0,%s1,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtx_f32_f64 (float64x2_t a)
{
float32x2_t result;
@@ -6276,7 +7278,8 @@ vcvtx_f32_f64 (float64x2_t a)
return result;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtx_high_f32_f64 (float32x2_t a, float64x2_t b)
{
float32x4_t result;
@@ -6287,7 +7290,8 @@ vcvtx_high_f32_f64 (float32x2_t a, float64x2_t b)
return result;
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtxd_f32_f64 (float64_t a)
{
float32_t result;
@@ -6298,7 +7302,8 @@ vcvtxd_f32_f64 (float64_t a)
return result;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_n_f32 (float32x2_t a, float32x2_t b, float32_t c)
{
float32x2_t result;
@@ -6310,7 +7315,8 @@ vmla_n_f32 (float32x2_t a, float32x2_t b, float32_t c)
return result;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_n_s16 (int16x4_t a, int16x4_t b, int16_t c)
{
int16x4_t result;
@@ -6321,7 +7327,8 @@ vmla_n_s16 (int16x4_t a, int16x4_t b, int16_t c)
return result;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_n_s32 (int32x2_t a, int32x2_t b, int32_t c)
{
int32x2_t result;
@@ -6332,7 +7339,8 @@ vmla_n_s32 (int32x2_t a, int32x2_t b, int32_t c)
return result;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c)
{
uint16x4_t result;
@@ -6343,7 +7351,8 @@ vmla_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c)
return result;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c)
{
uint32x2_t result;
@@ -6354,7 +7363,8 @@ vmla_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c)
return result;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
{
int8x8_t result;
@@ -6365,7 +7375,8 @@ vmla_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
return result;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
{
int16x4_t result;
@@ -6376,7 +7387,8 @@ vmla_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
return result;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
{
int32x2_t result;
@@ -6387,7 +7399,8 @@ vmla_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
return result;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
{
uint8x8_t result;
@@ -6398,7 +7411,8 @@ vmla_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
return result;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
{
uint16x4_t result;
@@ -6409,7 +7423,8 @@ vmla_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
return result;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
{
uint32x2_t result;
@@ -6532,7 +7547,8 @@ vmla_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
result; \
})
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c)
{
int32x4_t result;
@@ -6543,7 +7559,8 @@ vmlal_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c)
{
int64x2_t result;
@@ -6554,7 +7571,8 @@ vmlal_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_n_u16 (uint32x4_t a, uint16x8_t b, uint16_t c)
{
uint32x4_t result;
@@ -6565,7 +7583,8 @@ vmlal_high_n_u16 (uint32x4_t a, uint16x8_t b, uint16_t c)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_n_u32 (uint64x2_t a, uint32x4_t b, uint32_t c)
{
uint64x2_t result;
@@ -6576,7 +7595,8 @@ vmlal_high_n_u32 (uint64x2_t a, uint32x4_t b, uint32_t c)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
{
int16x8_t result;
@@ -6587,7 +7607,8 @@ vmlal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
{
int32x4_t result;
@@ -6598,7 +7619,8 @@ vmlal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
{
int64x2_t result;
@@ -6609,7 +7631,8 @@ vmlal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
{
uint16x8_t result;
@@ -6620,7 +7643,8 @@ vmlal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
{
uint32x4_t result;
@@ -6631,7 +7655,8 @@ vmlal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
{
uint64x2_t result;
@@ -6754,7 +7779,8 @@ vmlal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
result; \
})
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_n_s16 (int32x4_t a, int16x4_t b, int16_t c)
{
int32x4_t result;
@@ -6765,7 +7791,8 @@ vmlal_n_s16 (int32x4_t a, int16x4_t b, int16_t c)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_n_s32 (int64x2_t a, int32x2_t b, int32_t c)
{
int64x2_t result;
@@ -6776,7 +7803,8 @@ vmlal_n_s32 (int64x2_t a, int32x2_t b, int32_t c)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_n_u16 (uint32x4_t a, uint16x4_t b, uint16_t c)
{
uint32x4_t result;
@@ -6787,7 +7815,8 @@ vmlal_n_u16 (uint32x4_t a, uint16x4_t b, uint16_t c)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_n_u32 (uint64x2_t a, uint32x2_t b, uint32_t c)
{
uint64x2_t result;
@@ -6798,7 +7827,8 @@ vmlal_n_u32 (uint64x2_t a, uint32x2_t b, uint32_t c)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
{
int16x8_t result;
@@ -6809,7 +7839,8 @@ vmlal_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
{
int32x4_t result;
@@ -6820,7 +7851,8 @@ vmlal_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
{
int64x2_t result;
@@ -6831,7 +7863,8 @@ vmlal_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
{
uint16x8_t result;
@@ -6842,7 +7875,8 @@ vmlal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
{
uint32x4_t result;
@@ -6853,7 +7887,8 @@ vmlal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
{
uint64x2_t result;
@@ -6864,7 +7899,8 @@ vmlal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
return result;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_n_f32 (float32x4_t a, float32x4_t b, float32_t c)
{
float32x4_t result;
@@ -6876,7 +7912,8 @@ vmlaq_n_f32 (float32x4_t a, float32x4_t b, float32_t c)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_n_s16 (int16x8_t a, int16x8_t b, int16_t c)
{
int16x8_t result;
@@ -6887,7 +7924,8 @@ vmlaq_n_s16 (int16x8_t a, int16x8_t b, int16_t c)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_n_s32 (int32x4_t a, int32x4_t b, int32_t c)
{
int32x4_t result;
@@ -6898,7 +7936,8 @@ vmlaq_n_s32 (int32x4_t a, int32x4_t b, int32_t c)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c)
{
uint16x8_t result;
@@ -6909,7 +7948,8 @@ vmlaq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c)
{
uint32x4_t result;
@@ -6920,7 +7960,8 @@ vmlaq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c)
return result;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
{
int8x16_t result;
@@ -6931,7 +7972,8 @@ vmlaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
{
int16x8_t result;
@@ -6942,7 +7984,8 @@ vmlaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
{
int32x4_t result;
@@ -6953,7 +7996,8 @@ vmlaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
return result;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
{
uint8x16_t result;
@@ -6964,7 +8008,8 @@ vmlaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
{
uint16x8_t result;
@@ -6975,7 +8020,8 @@ vmlaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
{
uint32x4_t result;
@@ -6986,7 +8032,8 @@ vmlaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
return result;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_n_f32 (float32x2_t a, float32x2_t b, float32_t c)
{
float32x2_t result;
@@ -6998,7 +8045,8 @@ vmls_n_f32 (float32x2_t a, float32x2_t b, float32_t c)
return result;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_n_s16 (int16x4_t a, int16x4_t b, int16_t c)
{
int16x4_t result;
@@ -7009,7 +8057,8 @@ vmls_n_s16 (int16x4_t a, int16x4_t b, int16_t c)
return result;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_n_s32 (int32x2_t a, int32x2_t b, int32_t c)
{
int32x2_t result;
@@ -7020,7 +8069,8 @@ vmls_n_s32 (int32x2_t a, int32x2_t b, int32_t c)
return result;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c)
{
uint16x4_t result;
@@ -7031,7 +8081,8 @@ vmls_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c)
return result;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c)
{
uint32x2_t result;
@@ -7042,7 +8093,8 @@ vmls_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c)
return result;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
{
int8x8_t result;
@@ -7053,7 +8105,8 @@ vmls_s8 (int8x8_t a, int8x8_t b, int8x8_t c)
return result;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
{
int16x4_t result;
@@ -7064,7 +8117,8 @@ vmls_s16 (int16x4_t a, int16x4_t b, int16x4_t c)
return result;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
{
int32x2_t result;
@@ -7075,7 +8129,8 @@ vmls_s32 (int32x2_t a, int32x2_t b, int32x2_t c)
return result;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
{
uint8x8_t result;
@@ -7086,7 +8141,8 @@ vmls_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c)
return result;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
{
uint16x4_t result;
@@ -7097,7 +8153,8 @@ vmls_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c)
return result;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
{
uint32x2_t result;
@@ -7220,7 +8277,8 @@ vmls_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c)
result; \
})
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c)
{
int32x4_t result;
@@ -7231,7 +8289,8 @@ vmlsl_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c)
{
int64x2_t result;
@@ -7242,7 +8301,8 @@ vmlsl_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_n_u16 (uint32x4_t a, uint16x8_t b, uint16_t c)
{
uint32x4_t result;
@@ -7253,7 +8313,8 @@ vmlsl_high_n_u16 (uint32x4_t a, uint16x8_t b, uint16_t c)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_n_u32 (uint64x2_t a, uint32x4_t b, uint32_t c)
{
uint64x2_t result;
@@ -7264,7 +8325,8 @@ vmlsl_high_n_u32 (uint64x2_t a, uint32x4_t b, uint32_t c)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
{
int16x8_t result;
@@ -7275,7 +8337,8 @@ vmlsl_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
{
int32x4_t result;
@@ -7286,7 +8349,8 @@ vmlsl_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
{
int64x2_t result;
@@ -7297,7 +8361,8 @@ vmlsl_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
{
uint16x8_t result;
@@ -7308,7 +8373,8 @@ vmlsl_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
{
uint32x4_t result;
@@ -7319,7 +8385,8 @@ vmlsl_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
{
uint64x2_t result;
@@ -7442,7 +8509,8 @@ vmlsl_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c)
result; \
})
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_n_s16 (int32x4_t a, int16x4_t b, int16_t c)
{
int32x4_t result;
@@ -7453,7 +8521,8 @@ vmlsl_n_s16 (int32x4_t a, int16x4_t b, int16_t c)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_n_s32 (int64x2_t a, int32x2_t b, int32_t c)
{
int64x2_t result;
@@ -7464,7 +8533,8 @@ vmlsl_n_s32 (int64x2_t a, int32x2_t b, int32_t c)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_n_u16 (uint32x4_t a, uint16x4_t b, uint16_t c)
{
uint32x4_t result;
@@ -7475,7 +8545,8 @@ vmlsl_n_u16 (uint32x4_t a, uint16x4_t b, uint16_t c)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_n_u32 (uint64x2_t a, uint32x2_t b, uint32_t c)
{
uint64x2_t result;
@@ -7486,7 +8557,8 @@ vmlsl_n_u32 (uint64x2_t a, uint32x2_t b, uint32_t c)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
{
int16x8_t result;
@@ -7497,7 +8569,8 @@ vmlsl_s8 (int16x8_t a, int8x8_t b, int8x8_t c)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
{
int32x4_t result;
@@ -7508,7 +8581,8 @@ vmlsl_s16 (int32x4_t a, int16x4_t b, int16x4_t c)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
{
int64x2_t result;
@@ -7519,7 +8593,8 @@ vmlsl_s32 (int64x2_t a, int32x2_t b, int32x2_t c)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
{
uint16x8_t result;
@@ -7530,7 +8605,8 @@ vmlsl_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
{
uint32x4_t result;
@@ -7541,7 +8617,8 @@ vmlsl_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
{
uint64x2_t result;
@@ -7552,7 +8629,8 @@ vmlsl_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c)
return result;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_n_f32 (float32x4_t a, float32x4_t b, float32_t c)
{
float32x4_t result;
@@ -7564,7 +8642,8 @@ vmlsq_n_f32 (float32x4_t a, float32x4_t b, float32_t c)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_n_s16 (int16x8_t a, int16x8_t b, int16_t c)
{
int16x8_t result;
@@ -7575,7 +8654,8 @@ vmlsq_n_s16 (int16x8_t a, int16x8_t b, int16_t c)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_n_s32 (int32x4_t a, int32x4_t b, int32_t c)
{
int32x4_t result;
@@ -7586,7 +8666,8 @@ vmlsq_n_s32 (int32x4_t a, int32x4_t b, int32_t c)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c)
{
uint16x8_t result;
@@ -7597,7 +8678,8 @@ vmlsq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c)
{
uint32x4_t result;
@@ -7608,7 +8690,8 @@ vmlsq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c)
return result;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
{
int8x16_t result;
@@ -7619,7 +8702,8 @@ vmlsq_s8 (int8x16_t a, int8x16_t b, int8x16_t c)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
{
int16x8_t result;
@@ -7630,7 +8714,8 @@ vmlsq_s16 (int16x8_t a, int16x8_t b, int16x8_t c)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
{
int32x4_t result;
@@ -7641,7 +8726,8 @@ vmlsq_s32 (int32x4_t a, int32x4_t b, int32x4_t c)
return result;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
{
uint8x16_t result;
@@ -7652,7 +8738,8 @@ vmlsq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
{
uint16x8_t result;
@@ -7663,7 +8750,8 @@ vmlsq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
{
uint32x4_t result;
@@ -7674,7 +8762,8 @@ vmlsq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovl_high_s8 (int8x16_t a)
{
int16x8_t result;
@@ -7685,7 +8774,8 @@ vmovl_high_s8 (int8x16_t a)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovl_high_s16 (int16x8_t a)
{
int32x4_t result;
@@ -7696,7 +8786,8 @@ vmovl_high_s16 (int16x8_t a)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovl_high_s32 (int32x4_t a)
{
int64x2_t result;
@@ -7707,7 +8798,8 @@ vmovl_high_s32 (int32x4_t a)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovl_high_u8 (uint8x16_t a)
{
uint16x8_t result;
@@ -7718,7 +8810,8 @@ vmovl_high_u8 (uint8x16_t a)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovl_high_u16 (uint16x8_t a)
{
uint32x4_t result;
@@ -7729,7 +8822,8 @@ vmovl_high_u16 (uint16x8_t a)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovl_high_u32 (uint32x4_t a)
{
uint64x2_t result;
@@ -7740,7 +8834,8 @@ vmovl_high_u32 (uint32x4_t a)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovl_s8 (int8x8_t a)
{
int16x8_t result;
@@ -7751,7 +8846,8 @@ vmovl_s8 (int8x8_t a)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovl_s16 (int16x4_t a)
{
int32x4_t result;
@@ -7762,7 +8858,8 @@ vmovl_s16 (int16x4_t a)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovl_s32 (int32x2_t a)
{
int64x2_t result;
@@ -7773,7 +8870,8 @@ vmovl_s32 (int32x2_t a)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovl_u8 (uint8x8_t a)
{
uint16x8_t result;
@@ -7784,7 +8882,8 @@ vmovl_u8 (uint8x8_t a)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovl_u16 (uint16x4_t a)
{
uint32x4_t result;
@@ -7795,7 +8894,8 @@ vmovl_u16 (uint16x4_t a)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovl_u32 (uint32x2_t a)
{
uint64x2_t result;
@@ -7806,7 +8906,8 @@ vmovl_u32 (uint32x2_t a)
return result;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_s16 (int8x8_t a, int16x8_t b)
{
int8x16_t result = vcombine_s8 (a, vcreate_s8 (__AARCH64_UINT64_C (0x0)));
@@ -7817,7 +8918,8 @@ vmovn_high_s16 (int8x8_t a, int16x8_t b)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_s32 (int16x4_t a, int32x4_t b)
{
int16x8_t result = vcombine_s16 (a, vcreate_s16 (__AARCH64_UINT64_C (0x0)));
@@ -7828,7 +8930,8 @@ vmovn_high_s32 (int16x4_t a, int32x4_t b)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_s64 (int32x2_t a, int64x2_t b)
{
int32x4_t result = vcombine_s32 (a, vcreate_s32 (__AARCH64_UINT64_C (0x0)));
@@ -7839,7 +8942,8 @@ vmovn_high_s64 (int32x2_t a, int64x2_t b)
return result;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_u16 (uint8x8_t a, uint16x8_t b)
{
uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
@@ -7850,7 +8954,8 @@ vmovn_high_u16 (uint8x8_t a, uint16x8_t b)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_u32 (uint16x4_t a, uint32x4_t b)
{
uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0)));
@@ -7861,7 +8966,8 @@ vmovn_high_u32 (uint16x4_t a, uint32x4_t b)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_high_u64 (uint32x2_t a, uint64x2_t b)
{
uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0)));
@@ -7872,7 +8978,8 @@ vmovn_high_u64 (uint32x2_t a, uint64x2_t b)
return result;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_s16 (int16x8_t a)
{
int8x8_t result;
@@ -7883,7 +8990,8 @@ vmovn_s16 (int16x8_t a)
return result;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_s32 (int32x4_t a)
{
int16x4_t result;
@@ -7894,7 +9002,8 @@ vmovn_s32 (int32x4_t a)
return result;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_s64 (int64x2_t a)
{
int32x2_t result;
@@ -7905,7 +9014,8 @@ vmovn_s64 (int64x2_t a)
return result;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_u16 (uint16x8_t a)
{
uint8x8_t result;
@@ -7916,7 +9026,8 @@ vmovn_u16 (uint16x8_t a)
return result;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_u32 (uint32x4_t a)
{
uint16x4_t result;
@@ -7927,7 +9038,8 @@ vmovn_u32 (uint32x4_t a)
return result;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovn_u64 (uint64x2_t a)
{
uint32x2_t result;
@@ -7938,61 +9050,6 @@ vmovn_u64 (uint64x2_t a)
return result;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vmul_n_f32 (float32x2_t a, float32_t b)
-{
- float32x2_t result;
- __asm__ ("fmul %0.2s,%1.2s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vmul_n_s16 (int16x4_t a, int16_t b)
-{
- int16x4_t result;
- __asm__ ("mul %0.4h,%1.4h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "x"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vmul_n_s32 (int32x2_t a, int32_t b)
-{
- int32x2_t result;
- __asm__ ("mul %0.2s,%1.2s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vmul_n_u16 (uint16x4_t a, uint16_t b)
-{
- uint16x4_t result;
- __asm__ ("mul %0.4h,%1.4h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "x"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vmul_n_u32 (uint32x2_t a, uint32_t b)
-{
- uint32x2_t result;
- __asm__ ("mul %0.2s,%1.2s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
#define vmull_high_lane_s16(a, b, c) \
__extension__ \
({ \
@@ -8097,7 +9154,8 @@ vmul_n_u32 (uint32x2_t a, uint32_t b)
result; \
})
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_high_n_s16 (int16x8_t a, int16_t b)
{
int32x4_t result;
@@ -8108,7 +9166,8 @@ vmull_high_n_s16 (int16x8_t a, int16_t b)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_high_n_s32 (int32x4_t a, int32_t b)
{
int64x2_t result;
@@ -8119,7 +9178,8 @@ vmull_high_n_s32 (int32x4_t a, int32_t b)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_high_n_u16 (uint16x8_t a, uint16_t b)
{
uint32x4_t result;
@@ -8130,7 +9190,8 @@ vmull_high_n_u16 (uint16x8_t a, uint16_t b)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_high_n_u32 (uint32x4_t a, uint32_t b)
{
uint64x2_t result;
@@ -8141,7 +9202,8 @@ vmull_high_n_u32 (uint32x4_t a, uint32_t b)
return result;
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_high_p8 (poly8x16_t a, poly8x16_t b)
{
poly16x8_t result;
@@ -8152,7 +9214,8 @@ vmull_high_p8 (poly8x16_t a, poly8x16_t b)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_high_s8 (int8x16_t a, int8x16_t b)
{
int16x8_t result;
@@ -8163,7 +9226,8 @@ vmull_high_s8 (int8x16_t a, int8x16_t b)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_high_s16 (int16x8_t a, int16x8_t b)
{
int32x4_t result;
@@ -8174,7 +9238,8 @@ vmull_high_s16 (int16x8_t a, int16x8_t b)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_high_s32 (int32x4_t a, int32x4_t b)
{
int64x2_t result;
@@ -8185,7 +9250,8 @@ vmull_high_s32 (int32x4_t a, int32x4_t b)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_high_u8 (uint8x16_t a, uint8x16_t b)
{
uint16x8_t result;
@@ -8196,7 +9262,8 @@ vmull_high_u8 (uint8x16_t a, uint8x16_t b)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_high_u16 (uint16x8_t a, uint16x8_t b)
{
uint32x4_t result;
@@ -8207,7 +9274,8 @@ vmull_high_u16 (uint16x8_t a, uint16x8_t b)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_high_u32 (uint32x4_t a, uint32x4_t b)
{
uint64x2_t result;
@@ -8322,7 +9390,8 @@ vmull_high_u32 (uint32x4_t a, uint32x4_t b)
result; \
})
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_n_s16 (int16x4_t a, int16_t b)
{
int32x4_t result;
@@ -8333,7 +9402,8 @@ vmull_n_s16 (int16x4_t a, int16_t b)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_n_s32 (int32x2_t a, int32_t b)
{
int64x2_t result;
@@ -8344,7 +9414,8 @@ vmull_n_s32 (int32x2_t a, int32_t b)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_n_u16 (uint16x4_t a, uint16_t b)
{
uint32x4_t result;
@@ -8355,7 +9426,8 @@ vmull_n_u16 (uint16x4_t a, uint16_t b)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_n_u32 (uint32x2_t a, uint32_t b)
{
uint64x2_t result;
@@ -8366,7 +9438,8 @@ vmull_n_u32 (uint32x2_t a, uint32_t b)
return result;
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_p8 (poly8x8_t a, poly8x8_t b)
{
poly16x8_t result;
@@ -8377,7 +9450,8 @@ vmull_p8 (poly8x8_t a, poly8x8_t b)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_s8 (int8x8_t a, int8x8_t b)
{
int16x8_t result;
@@ -8388,7 +9462,8 @@ vmull_s8 (int8x8_t a, int8x8_t b)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_s16 (int16x4_t a, int16x4_t b)
{
int32x4_t result;
@@ -8399,7 +9474,8 @@ vmull_s16 (int16x4_t a, int16x4_t b)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_s32 (int32x2_t a, int32x2_t b)
{
int64x2_t result;
@@ -8410,7 +9486,8 @@ vmull_s32 (int32x2_t a, int32x2_t b)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_u8 (uint8x8_t a, uint8x8_t b)
{
uint16x8_t result;
@@ -8421,7 +9498,8 @@ vmull_u8 (uint8x8_t a, uint8x8_t b)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_u16 (uint16x4_t a, uint16x4_t b)
{
uint32x4_t result;
@@ -8432,7 +9510,8 @@ vmull_u16 (uint16x4_t a, uint16x4_t b)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_u32 (uint32x2_t a, uint32x2_t b)
{
uint64x2_t result;
@@ -8443,228 +9522,8 @@ vmull_u32 (uint32x2_t a, uint32x2_t b)
return result;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vmulq_n_f32 (float32x4_t a, float32_t b)
-{
- float32x4_t result;
- __asm__ ("fmul %0.4s,%1.4s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vmulq_n_f64 (float64x2_t a, float64_t b)
-{
- float64x2_t result;
- __asm__ ("fmul %0.2d,%1.2d,%2.d[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmulq_n_s16 (int16x8_t a, int16_t b)
-{
- int16x8_t result;
- __asm__ ("mul %0.8h,%1.8h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "x"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmulq_n_s32 (int32x4_t a, int32_t b)
-{
- int32x4_t result;
- __asm__ ("mul %0.4s,%1.4s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmulq_n_u16 (uint16x8_t a, uint16_t b)
-{
- uint16x8_t result;
- __asm__ ("mul %0.8h,%1.8h,%2.h[0]"
- : "=w"(result)
- : "w"(a), "x"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmulq_n_u32 (uint32x4_t a, uint32_t b)
-{
- uint32x4_t result;
- __asm__ ("mul %0.4s,%1.4s,%2.s[0]"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vmvn_p8 (poly8x8_t a)
-{
- poly8x8_t result;
- __asm__ ("mvn %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vmvn_s8 (int8x8_t a)
-{
- int8x8_t result;
- __asm__ ("mvn %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vmvn_s16 (int16x4_t a)
-{
- int16x4_t result;
- __asm__ ("mvn %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vmvn_s32 (int32x2_t a)
-{
- int32x2_t result;
- __asm__ ("mvn %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vmvn_u8 (uint8x8_t a)
-{
- uint8x8_t result;
- __asm__ ("mvn %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vmvn_u16 (uint16x4_t a)
-{
- uint16x4_t result;
- __asm__ ("mvn %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vmvn_u32 (uint32x2_t a)
-{
- uint32x2_t result;
- __asm__ ("mvn %0.8b,%1.8b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vmvnq_p8 (poly8x16_t a)
-{
- poly8x16_t result;
- __asm__ ("mvn %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vmvnq_s8 (int8x16_t a)
-{
- int8x16_t result;
- __asm__ ("mvn %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmvnq_s16 (int16x8_t a)
-{
- int16x8_t result;
- __asm__ ("mvn %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmvnq_s32 (int32x4_t a)
-{
- int32x4_t result;
- __asm__ ("mvn %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vmvnq_u8 (uint8x16_t a)
-{
- uint8x16_t result;
- __asm__ ("mvn %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmvnq_u16 (uint16x8_t a)
-{
- uint16x8_t result;
- __asm__ ("mvn %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmvnq_u32 (uint32x4_t a)
-{
- uint32x4_t result;
- __asm__ ("mvn %0.16b,%1.16b"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadal_s8 (int16x4_t a, int8x8_t b)
{
int16x4_t result;
@@ -8675,7 +9534,8 @@ vpadal_s8 (int16x4_t a, int8x8_t b)
return result;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadal_s16 (int32x2_t a, int16x4_t b)
{
int32x2_t result;
@@ -8686,7 +9546,8 @@ vpadal_s16 (int32x2_t a, int16x4_t b)
return result;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadal_s32 (int64x1_t a, int32x2_t b)
{
int64x1_t result;
@@ -8697,7 +9558,8 @@ vpadal_s32 (int64x1_t a, int32x2_t b)
return result;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadal_u8 (uint16x4_t a, uint8x8_t b)
{
uint16x4_t result;
@@ -8708,7 +9570,8 @@ vpadal_u8 (uint16x4_t a, uint8x8_t b)
return result;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadal_u16 (uint32x2_t a, uint16x4_t b)
{
uint32x2_t result;
@@ -8719,7 +9582,8 @@ vpadal_u16 (uint32x2_t a, uint16x4_t b)
return result;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadal_u32 (uint64x1_t a, uint32x2_t b)
{
uint64x1_t result;
@@ -8730,7 +9594,8 @@ vpadal_u32 (uint64x1_t a, uint32x2_t b)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadalq_s8 (int16x8_t a, int8x16_t b)
{
int16x8_t result;
@@ -8741,7 +9606,8 @@ vpadalq_s8 (int16x8_t a, int8x16_t b)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadalq_s16 (int32x4_t a, int16x8_t b)
{
int32x4_t result;
@@ -8752,7 +9618,8 @@ vpadalq_s16 (int32x4_t a, int16x8_t b)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadalq_s32 (int64x2_t a, int32x4_t b)
{
int64x2_t result;
@@ -8763,7 +9630,8 @@ vpadalq_s32 (int64x2_t a, int32x4_t b)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadalq_u8 (uint16x8_t a, uint8x16_t b)
{
uint16x8_t result;
@@ -8774,7 +9642,8 @@ vpadalq_u8 (uint16x8_t a, uint8x16_t b)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadalq_u16 (uint32x4_t a, uint16x8_t b)
{
uint32x4_t result;
@@ -8785,7 +9654,8 @@ vpadalq_u16 (uint32x4_t a, uint16x8_t b)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadalq_u32 (uint64x2_t a, uint32x4_t b)
{
uint64x2_t result;
@@ -8796,18 +9666,8 @@ vpadalq_u32 (uint64x2_t a, uint32x4_t b)
return result;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vpadd_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("faddp %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_s8 (int8x8_t a)
{
int16x4_t result;
@@ -8818,7 +9678,8 @@ vpaddl_s8 (int8x8_t a)
return result;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_s16 (int16x4_t a)
{
int32x2_t result;
@@ -8829,7 +9690,8 @@ vpaddl_s16 (int16x4_t a)
return result;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_s32 (int32x2_t a)
{
int64x1_t result;
@@ -8840,7 +9702,8 @@ vpaddl_s32 (int32x2_t a)
return result;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_u8 (uint8x8_t a)
{
uint16x4_t result;
@@ -8851,7 +9714,8 @@ vpaddl_u8 (uint8x8_t a)
return result;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_u16 (uint16x4_t a)
{
uint32x2_t result;
@@ -8862,7 +9726,8 @@ vpaddl_u16 (uint16x4_t a)
return result;
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_u32 (uint32x2_t a)
{
uint64x1_t result;
@@ -8873,7 +9738,8 @@ vpaddl_u32 (uint32x2_t a)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_s8 (int8x16_t a)
{
int16x8_t result;
@@ -8884,7 +9750,8 @@ vpaddlq_s8 (int8x16_t a)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_s16 (int16x8_t a)
{
int32x4_t result;
@@ -8895,7 +9762,8 @@ vpaddlq_s16 (int16x8_t a)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_s32 (int32x4_t a)
{
int64x2_t result;
@@ -8906,7 +9774,8 @@ vpaddlq_s32 (int32x4_t a)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_u8 (uint8x16_t a)
{
uint16x8_t result;
@@ -8917,7 +9786,8 @@ vpaddlq_u8 (uint8x16_t a)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_u16 (uint16x8_t a)
{
uint32x4_t result;
@@ -8928,7 +9798,8 @@ vpaddlq_u16 (uint16x8_t a)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_u32 (uint32x4_t a)
{
uint64x2_t result;
@@ -8939,29 +9810,8 @@ vpaddlq_u32 (uint32x4_t a)
return result;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vpaddq_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("faddp %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vpaddq_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("faddp %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddq_s8 (int8x16_t a, int8x16_t b)
{
int8x16_t result;
@@ -8972,7 +9822,8 @@ vpaddq_s8 (int8x16_t a, int8x16_t b)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddq_s16 (int16x8_t a, int16x8_t b)
{
int16x8_t result;
@@ -8983,7 +9834,8 @@ vpaddq_s16 (int16x8_t a, int16x8_t b)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddq_s32 (int32x4_t a, int32x4_t b)
{
int32x4_t result;
@@ -8994,7 +9846,8 @@ vpaddq_s32 (int32x4_t a, int32x4_t b)
return result;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddq_s64 (int64x2_t a, int64x2_t b)
{
int64x2_t result;
@@ -9005,7 +9858,8 @@ vpaddq_s64 (int64x2_t a, int64x2_t b)
return result;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddq_u8 (uint8x16_t a, uint8x16_t b)
{
uint8x16_t result;
@@ -9016,7 +9870,8 @@ vpaddq_u8 (uint8x16_t a, uint8x16_t b)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddq_u16 (uint16x8_t a, uint16x8_t b)
{
uint16x8_t result;
@@ -9027,7 +9882,8 @@ vpaddq_u16 (uint16x8_t a, uint16x8_t b)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddq_u32 (uint32x4_t a, uint32x4_t b)
{
uint32x4_t result;
@@ -9038,7 +9894,8 @@ vpaddq_u32 (uint32x4_t a, uint32x4_t b)
return result;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddq_u64 (uint64x2_t a, uint64x2_t b)
{
uint64x2_t result;
@@ -9049,18 +9906,8 @@ vpaddq_u64 (uint64x2_t a, uint64x2_t b)
return result;
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vpadds_f32 (float32x2_t a)
-{
- float32_t result;
- __asm__ ("faddp %s0,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulh_n_s16 (int16x4_t a, int16_t b)
{
int16x4_t result;
@@ -9071,7 +9918,8 @@ vqdmulh_n_s16 (int16x4_t a, int16_t b)
return result;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulh_n_s32 (int32x2_t a, int32_t b)
{
int32x2_t result;
@@ -9082,7 +9930,8 @@ vqdmulh_n_s32 (int32x2_t a, int32_t b)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhq_n_s16 (int16x8_t a, int16_t b)
{
int16x8_t result;
@@ -9093,7 +9942,8 @@ vqdmulhq_n_s16 (int16x8_t a, int16_t b)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhq_n_s32 (int32x4_t a, int32_t b)
{
int32x4_t result;
@@ -9104,7 +9954,8 @@ vqdmulhq_n_s32 (int32x4_t a, int32_t b)
return result;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_high_s16 (int8x8_t a, int16x8_t b)
{
int8x16_t result = vcombine_s8 (a, vcreate_s8 (__AARCH64_UINT64_C (0x0)));
@@ -9115,7 +9966,8 @@ vqmovn_high_s16 (int8x8_t a, int16x8_t b)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_high_s32 (int16x4_t a, int32x4_t b)
{
int16x8_t result = vcombine_s16 (a, vcreate_s16 (__AARCH64_UINT64_C (0x0)));
@@ -9126,7 +9978,8 @@ vqmovn_high_s32 (int16x4_t a, int32x4_t b)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_high_s64 (int32x2_t a, int64x2_t b)
{
int32x4_t result = vcombine_s32 (a, vcreate_s32 (__AARCH64_UINT64_C (0x0)));
@@ -9137,7 +9990,8 @@ vqmovn_high_s64 (int32x2_t a, int64x2_t b)
return result;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_high_u16 (uint8x8_t a, uint16x8_t b)
{
uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
@@ -9148,7 +10002,8 @@ vqmovn_high_u16 (uint8x8_t a, uint16x8_t b)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_high_u32 (uint16x4_t a, uint32x4_t b)
{
uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0)));
@@ -9159,7 +10014,8 @@ vqmovn_high_u32 (uint16x4_t a, uint32x4_t b)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_high_u64 (uint32x2_t a, uint64x2_t b)
{
uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0)));
@@ -9170,7 +10026,8 @@ vqmovn_high_u64 (uint32x2_t a, uint64x2_t b)
return result;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovun_high_s16 (uint8x8_t a, int16x8_t b)
{
uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
@@ -9181,7 +10038,8 @@ vqmovun_high_s16 (uint8x8_t a, int16x8_t b)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovun_high_s32 (uint16x4_t a, int32x4_t b)
{
uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0)));
@@ -9192,7 +10050,8 @@ vqmovun_high_s32 (uint16x4_t a, int32x4_t b)
return result;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovun_high_s64 (uint32x2_t a, int64x2_t b)
{
uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0)));
@@ -9203,7 +10062,8 @@ vqmovun_high_s64 (uint32x2_t a, int64x2_t b)
return result;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulh_n_s16 (int16x4_t a, int16_t b)
{
int16x4_t result;
@@ -9214,7 +10074,8 @@ vqrdmulh_n_s16 (int16x4_t a, int16_t b)
return result;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulh_n_s32 (int32x2_t a, int32_t b)
{
int32x2_t result;
@@ -9225,7 +10086,8 @@ vqrdmulh_n_s32 (int32x2_t a, int32_t b)
return result;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhq_n_s16 (int16x8_t a, int16_t b)
{
int16x8_t result;
@@ -9236,7 +10098,8 @@ vqrdmulhq_n_s16 (int16x8_t a, int16_t b)
return result;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhq_n_s32 (int32x4_t a, int32_t b)
{
int32x4_t result;
@@ -9679,29 +10542,8 @@ vqrdmulhq_n_s32 (int32x4_t a, int32_t b)
result; \
})
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vrsqrte_f32 (float32x2_t a)
-{
- float32x2_t result;
- __asm__ ("frsqrte %0.2s,%1.2s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
-vrsqrte_f64 (float64x1_t a)
-{
- float64x1_t result;
- __asm__ ("frsqrte %d0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsqrte_u32 (uint32x2_t a)
{
uint32x2_t result;
@@ -9712,40 +10554,8 @@ vrsqrte_u32 (uint32x2_t a)
return result;
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vrsqrted_f64 (float64_t a)
-{
- float64_t result;
- __asm__ ("frsqrte %d0,%d1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vrsqrteq_f32 (float32x4_t a)
-{
- float32x4_t result;
- __asm__ ("frsqrte %0.4s,%1.4s"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vrsqrteq_f64 (float64x2_t a)
-{
- float64x2_t result;
- __asm__ ("frsqrte %0.2d,%1.2d"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsqrteq_u32 (uint32x4_t a)
{
uint32x4_t result;
@@ -9756,72 +10566,6 @@ vrsqrteq_u32 (uint32x4_t a)
return result;
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vrsqrtes_f32 (float32_t a)
-{
- float32_t result;
- __asm__ ("frsqrte %s0,%s1"
- : "=w"(result)
- : "w"(a)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vrsqrts_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("frsqrts %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vrsqrtsd_f64 (float64_t a, float64_t b)
-{
- float64_t result;
- __asm__ ("frsqrts %d0,%d1,%d2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vrsqrtsq_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("frsqrts %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vrsqrtsq_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("frsqrts %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vrsqrtss_f32 (float32_t a, float32_t b)
-{
- float32_t result;
- __asm__ ("frsqrts %s0,%s1,%s2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
#define vshrn_high_n_s16(a, b, c) \
__extension__ \
({ \
@@ -10062,6 +10806,19 @@ vrsqrtss_f32 (float32_t a, float32_t b)
result; \
})
+#define vsri_n_p64(a, b, c) \
+ __extension__ \
+ ({ \
+ poly64x1_t b_ = (b); \
+ poly64x1_t a_ = (a); \
+ poly64x1_t result; \
+ __asm__ ("sri %d0,%d2,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers. */); \
+ result; \
+ })
+
#define vsriq_n_p8(a, b, c) \
__extension__ \
({ \
@@ -10088,7 +10845,21 @@ vrsqrtss_f32 (float32_t a, float32_t b)
result; \
})
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+#define vsriq_n_p64(a, b, c) \
+ __extension__ \
+ ({ \
+ poly64x2_t b_ = (b); \
+ poly64x2_t a_ = (a); \
+ poly64x2_t result; \
+ __asm__ ("sri %0.2d,%2.2d,%3" \
+ : "=w"(result) \
+ : "0"(a_), "w"(b_), "i"(c) \
+ : /* No clobbers. */); \
+ result; \
+ })
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtst_p8 (poly8x8_t a, poly8x8_t b)
{
uint8x8_t result;
@@ -10099,7 +10870,8 @@ vtst_p8 (poly8x8_t a, poly8x8_t b)
return result;
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtst_p16 (poly16x4_t a, poly16x4_t b)
{
uint16x4_t result;
@@ -10110,7 +10882,8 @@ vtst_p16 (poly16x4_t a, poly16x4_t b)
return result;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtstq_p8 (poly8x16_t a, poly8x16_t b)
{
uint8x16_t result;
@@ -10121,7 +10894,8 @@ vtstq_p8 (poly8x16_t a, poly8x16_t b)
return result;
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtstq_p16 (poly16x8_t a, poly16x8_t b)
{
uint16x8_t result;
@@ -10220,8 +10994,8 @@ __STRUCTN (float, 64, 4)
#define __ST2_LANE_FUNC(intype, largetype, ptrtype, mode, \
qmode, ptr_mode, funcsuffix, signedtype) \
-__extension__ static __inline void \
-__attribute__ ((__always_inline__)) \
+__extension__ extern __inline void \
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
vst2_lane_ ## funcsuffix (ptrtype *__ptr, \
intype __b, const int __c) \
{ \
@@ -10251,6 +11025,8 @@ __ST2_LANE_FUNC (poly8x8x2_t, poly8x16x2_t, poly8_t, v8qi, v16qi, qi, p8,
int8x16_t)
__ST2_LANE_FUNC (poly16x4x2_t, poly16x8x2_t, poly16_t, v4hi, v8hi, hi, p16,
int16x8_t)
+__ST2_LANE_FUNC (poly64x1x2_t, poly64x2x2_t, poly64_t, di, v2di_ssps, di, p64,
+ poly64x2_t)
__ST2_LANE_FUNC (int8x8x2_t, int8x16x2_t, int8_t, v8qi, v16qi, qi, s8,
int8x16_t)
__ST2_LANE_FUNC (int16x4x2_t, int16x8x2_t, int16_t, v4hi, v8hi, hi, s16,
@@ -10270,8 +11046,8 @@ __ST2_LANE_FUNC (uint64x1x2_t, uint64x2x2_t, uint64_t, di, v2di, di, u64,
#undef __ST2_LANE_FUNC
#define __ST2_LANE_FUNC(intype, ptrtype, mode, ptr_mode, funcsuffix) \
-__extension__ static __inline void \
-__attribute__ ((__always_inline__)) \
+__extension__ extern __inline void \
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
vst2q_lane_ ## funcsuffix (ptrtype *__ptr, \
intype __b, const int __c) \
{ \
@@ -10286,6 +11062,7 @@ __ST2_LANE_FUNC (float32x4x2_t, float32_t, v4sf, sf, f32)
__ST2_LANE_FUNC (float64x2x2_t, float64_t, v2df, df, f64)
__ST2_LANE_FUNC (poly8x16x2_t, poly8_t, v16qi, qi, p8)
__ST2_LANE_FUNC (poly16x8x2_t, poly16_t, v8hi, hi, p16)
+__ST2_LANE_FUNC (poly64x2x2_t, poly64_t, v2di, di, p64)
__ST2_LANE_FUNC (int8x16x2_t, int8_t, v16qi, qi, s8)
__ST2_LANE_FUNC (int16x8x2_t, int16_t, v8hi, hi, s16)
__ST2_LANE_FUNC (int32x4x2_t, int32_t, v4si, si, s32)
@@ -10297,8 +11074,8 @@ __ST2_LANE_FUNC (uint64x2x2_t, uint64_t, v2di, di, u64)
#define __ST3_LANE_FUNC(intype, largetype, ptrtype, mode, \
qmode, ptr_mode, funcsuffix, signedtype) \
-__extension__ static __inline void \
-__attribute__ ((__always_inline__)) \
+__extension__ extern __inline void \
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
vst3_lane_ ## funcsuffix (ptrtype *__ptr, \
intype __b, const int __c) \
{ \
@@ -10333,6 +11110,8 @@ __ST3_LANE_FUNC (poly8x8x3_t, poly8x16x3_t, poly8_t, v8qi, v16qi, qi, p8,
int8x16_t)
__ST3_LANE_FUNC (poly16x4x3_t, poly16x8x3_t, poly16_t, v4hi, v8hi, hi, p16,
int16x8_t)
+__ST3_LANE_FUNC (poly64x1x3_t, poly64x2x3_t, poly64_t, di, v2di_ssps, di, p64,
+ poly64x2_t)
__ST3_LANE_FUNC (int8x8x3_t, int8x16x3_t, int8_t, v8qi, v16qi, qi, s8,
int8x16_t)
__ST3_LANE_FUNC (int16x4x3_t, int16x8x3_t, int16_t, v4hi, v8hi, hi, s16,
@@ -10352,8 +11131,8 @@ __ST3_LANE_FUNC (uint64x1x3_t, uint64x2x3_t, uint64_t, di, v2di, di, u64,
#undef __ST3_LANE_FUNC
#define __ST3_LANE_FUNC(intype, ptrtype, mode, ptr_mode, funcsuffix) \
-__extension__ static __inline void \
-__attribute__ ((__always_inline__)) \
+__extension__ extern __inline void \
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
vst3q_lane_ ## funcsuffix (ptrtype *__ptr, \
intype __b, const int __c) \
{ \
@@ -10368,6 +11147,7 @@ __ST3_LANE_FUNC (float32x4x3_t, float32_t, v4sf, sf, f32)
__ST3_LANE_FUNC (float64x2x3_t, float64_t, v2df, df, f64)
__ST3_LANE_FUNC (poly8x16x3_t, poly8_t, v16qi, qi, p8)
__ST3_LANE_FUNC (poly16x8x3_t, poly16_t, v8hi, hi, p16)
+__ST3_LANE_FUNC (poly64x2x3_t, poly64_t, v2di, di, p64)
__ST3_LANE_FUNC (int8x16x3_t, int8_t, v16qi, qi, s8)
__ST3_LANE_FUNC (int16x8x3_t, int16_t, v8hi, hi, s16)
__ST3_LANE_FUNC (int32x4x3_t, int32_t, v4si, si, s32)
@@ -10379,8 +11159,8 @@ __ST3_LANE_FUNC (uint64x2x3_t, uint64_t, v2di, di, u64)
#define __ST4_LANE_FUNC(intype, largetype, ptrtype, mode, \
qmode, ptr_mode, funcsuffix, signedtype) \
-__extension__ static __inline void \
-__attribute__ ((__always_inline__)) \
+__extension__ extern __inline void \
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
vst4_lane_ ## funcsuffix (ptrtype *__ptr, \
intype __b, const int __c) \
{ \
@@ -10420,6 +11200,8 @@ __ST4_LANE_FUNC (poly8x8x4_t, poly8x16x4_t, poly8_t, v8qi, v16qi, qi, p8,
int8x16_t)
__ST4_LANE_FUNC (poly16x4x4_t, poly16x8x4_t, poly16_t, v4hi, v8hi, hi, p16,
int16x8_t)
+__ST4_LANE_FUNC (poly64x1x4_t, poly64x2x4_t, poly64_t, di, v2di_ssps, di, p64,
+ poly64x2_t)
__ST4_LANE_FUNC (int8x8x4_t, int8x16x4_t, int8_t, v8qi, v16qi, qi, s8,
int8x16_t)
__ST4_LANE_FUNC (int16x4x4_t, int16x8x4_t, int16_t, v4hi, v8hi, hi, s16,
@@ -10439,8 +11221,8 @@ __ST4_LANE_FUNC (uint64x1x4_t, uint64x2x4_t, uint64_t, di, v2di, di, u64,
#undef __ST4_LANE_FUNC
#define __ST4_LANE_FUNC(intype, ptrtype, mode, ptr_mode, funcsuffix) \
-__extension__ static __inline void \
-__attribute__ ((__always_inline__)) \
+__extension__ extern __inline void \
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
vst4q_lane_ ## funcsuffix (ptrtype *__ptr, \
intype __b, const int __c) \
{ \
@@ -10455,6 +11237,7 @@ __ST4_LANE_FUNC (float32x4x4_t, float32_t, v4sf, sf, f32)
__ST4_LANE_FUNC (float64x2x4_t, float64_t, v2df, df, f64)
__ST4_LANE_FUNC (poly8x16x4_t, poly8_t, v16qi, qi, p8)
__ST4_LANE_FUNC (poly16x8x4_t, poly16_t, v8hi, hi, p16)
+__ST4_LANE_FUNC (poly64x2x4_t, poly64_t, v2di, di, p64)
__ST4_LANE_FUNC (int8x16x4_t, int8_t, v16qi, qi, s8)
__ST4_LANE_FUNC (int16x8x4_t, int16_t, v8hi, hi, s16)
__ST4_LANE_FUNC (int32x4x4_t, int32_t, v4si, si, s32)
@@ -10464,7 +11247,8 @@ __ST4_LANE_FUNC (uint16x8x4_t, uint16_t, v8hi, hi, u16)
__ST4_LANE_FUNC (uint32x4x4_t, uint32_t, v4si, si, u32)
__ST4_LANE_FUNC (uint64x2x4_t, uint64_t, v2di, di, u64)
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddlv_s32 (int32x2_t a)
{
int64_t result;
@@ -10472,7 +11256,8 @@ vaddlv_s32 (int32x2_t a)
return result;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddlv_u32 (uint32x2_t a)
{
uint64_t result;
@@ -10480,49 +11265,57 @@ vaddlv_u32 (uint32x2_t a)
return result;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c)
{
return __builtin_aarch64_sqdmulh_laneqv4hi (__a, __b, __c);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c)
{
return __builtin_aarch64_sqdmulh_laneqv2si (__a, __b, __c);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
{
return __builtin_aarch64_sqdmulh_laneqv8hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
{
return __builtin_aarch64_sqdmulh_laneqv4si (__a, __b, __c);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c)
{
return __builtin_aarch64_sqrdmulh_laneqv4hi (__a, __b, __c);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c)
{
return __builtin_aarch64_sqrdmulh_laneqv2si (__a, __b, __c);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
{
return __builtin_aarch64_sqrdmulh_laneqv8hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
{
return __builtin_aarch64_sqrdmulh_laneqv4si (__a, __b, __c);
@@ -10530,7 +11323,8 @@ vqrdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
/* Table intrinsics. */
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl1_p8 (poly8x16_t a, uint8x8_t b)
{
poly8x8_t result;
@@ -10541,7 +11335,8 @@ vqtbl1_p8 (poly8x16_t a, uint8x8_t b)
return result;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl1_s8 (int8x16_t a, uint8x8_t b)
{
int8x8_t result;
@@ -10552,7 +11347,8 @@ vqtbl1_s8 (int8x16_t a, uint8x8_t b)
return result;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl1_u8 (uint8x16_t a, uint8x8_t b)
{
uint8x8_t result;
@@ -10563,7 +11359,8 @@ vqtbl1_u8 (uint8x16_t a, uint8x8_t b)
return result;
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl1q_p8 (poly8x16_t a, uint8x16_t b)
{
poly8x16_t result;
@@ -10574,7 +11371,8 @@ vqtbl1q_p8 (poly8x16_t a, uint8x16_t b)
return result;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl1q_s8 (int8x16_t a, uint8x16_t b)
{
int8x16_t result;
@@ -10585,7 +11383,8 @@ vqtbl1q_s8 (int8x16_t a, uint8x16_t b)
return result;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl1q_u8 (uint8x16_t a, uint8x16_t b)
{
uint8x16_t result;
@@ -10596,7 +11395,8 @@ vqtbl1q_u8 (uint8x16_t a, uint8x16_t b)
return result;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx1_s8 (int8x8_t r, int8x16_t tab, uint8x8_t idx)
{
int8x8_t result = r;
@@ -10607,7 +11407,8 @@ vqtbx1_s8 (int8x8_t r, int8x16_t tab, uint8x8_t idx)
return result;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx1_u8 (uint8x8_t r, uint8x16_t tab, uint8x8_t idx)
{
uint8x8_t result = r;
@@ -10618,7 +11419,8 @@ vqtbx1_u8 (uint8x8_t r, uint8x16_t tab, uint8x8_t idx)
return result;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx1_p8 (poly8x8_t r, poly8x16_t tab, uint8x8_t idx)
{
poly8x8_t result = r;
@@ -10629,7 +11431,8 @@ vqtbx1_p8 (poly8x8_t r, poly8x16_t tab, uint8x8_t idx)
return result;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx1q_s8 (int8x16_t r, int8x16_t tab, uint8x16_t idx)
{
int8x16_t result = r;
@@ -10640,7 +11443,8 @@ vqtbx1q_s8 (int8x16_t r, int8x16_t tab, uint8x16_t idx)
return result;
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx1q_u8 (uint8x16_t r, uint8x16_t tab, uint8x16_t idx)
{
uint8x16_t result = r;
@@ -10651,7 +11455,8 @@ vqtbx1q_u8 (uint8x16_t r, uint8x16_t tab, uint8x16_t idx)
return result;
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx1q_p8 (poly8x16_t r, poly8x16_t tab, uint8x16_t idx)
{
poly8x16_t result = r;
@@ -10664,7 +11469,8 @@ vqtbx1q_p8 (poly8x16_t r, poly8x16_t tab, uint8x16_t idx)
/* V7 legacy table intrinsics. */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl1_s8 (int8x8_t tab, int8x8_t idx)
{
int8x8_t result;
@@ -10676,7 +11482,8 @@ vtbl1_s8 (int8x8_t tab, int8x8_t idx)
return result;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl1_u8 (uint8x8_t tab, uint8x8_t idx)
{
uint8x8_t result;
@@ -10688,7 +11495,8 @@ vtbl1_u8 (uint8x8_t tab, uint8x8_t idx)
return result;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl1_p8 (poly8x8_t tab, uint8x8_t idx)
{
poly8x8_t result;
@@ -10700,7 +11508,8 @@ vtbl1_p8 (poly8x8_t tab, uint8x8_t idx)
return result;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl2_s8 (int8x8x2_t tab, int8x8_t idx)
{
int8x8_t result;
@@ -10712,7 +11521,8 @@ vtbl2_s8 (int8x8x2_t tab, int8x8_t idx)
return result;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl2_u8 (uint8x8x2_t tab, uint8x8_t idx)
{
uint8x8_t result;
@@ -10724,7 +11534,8 @@ vtbl2_u8 (uint8x8x2_t tab, uint8x8_t idx)
return result;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl2_p8 (poly8x8x2_t tab, uint8x8_t idx)
{
poly8x8_t result;
@@ -10736,7 +11547,8 @@ vtbl2_p8 (poly8x8x2_t tab, uint8x8_t idx)
return result;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl3_s8 (int8x8x3_t tab, int8x8_t idx)
{
int8x8_t result;
@@ -10752,7 +11564,8 @@ vtbl3_s8 (int8x8x3_t tab, int8x8_t idx)
return result;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl3_u8 (uint8x8x3_t tab, uint8x8_t idx)
{
uint8x8_t result;
@@ -10768,7 +11581,8 @@ vtbl3_u8 (uint8x8x3_t tab, uint8x8_t idx)
return result;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl3_p8 (poly8x8x3_t tab, uint8x8_t idx)
{
poly8x8_t result;
@@ -10784,7 +11598,8 @@ vtbl3_p8 (poly8x8x3_t tab, uint8x8_t idx)
return result;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl4_s8 (int8x8x4_t tab, int8x8_t idx)
{
int8x8_t result;
@@ -10800,7 +11615,8 @@ vtbl4_s8 (int8x8x4_t tab, int8x8_t idx)
return result;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl4_u8 (uint8x8x4_t tab, uint8x8_t idx)
{
uint8x8_t result;
@@ -10816,7 +11632,8 @@ vtbl4_u8 (uint8x8x4_t tab, uint8x8_t idx)
return result;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbl4_p8 (poly8x8x4_t tab, uint8x8_t idx)
{
poly8x8_t result;
@@ -10832,7 +11649,8 @@ vtbl4_p8 (poly8x8x4_t tab, uint8x8_t idx)
return result;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbx2_s8 (int8x8_t r, int8x8x2_t tab, int8x8_t idx)
{
int8x8_t result = r;
@@ -10844,7 +11662,8 @@ vtbx2_s8 (int8x8_t r, int8x8x2_t tab, int8x8_t idx)
return result;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbx2_u8 (uint8x8_t r, uint8x8x2_t tab, uint8x8_t idx)
{
uint8x8_t result = r;
@@ -10856,7 +11675,8 @@ vtbx2_u8 (uint8x8_t r, uint8x8x2_t tab, uint8x8_t idx)
return result;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbx2_p8 (poly8x8_t r, poly8x8x2_t tab, uint8x8_t idx)
{
poly8x8_t result = r;
@@ -10872,75 +11692,132 @@ vtbx2_p8 (poly8x8_t r, poly8x8x2_t tab, uint8x8_t idx)
/* Start of optimal implementations in approved order. */
+/* vabd. */
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabds_f32 (float32_t __a, float32_t __b)
+{
+ return __builtin_aarch64_fabdsf (__a, __b);
+}
+
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdd_f64 (float64_t __a, float64_t __b)
+{
+ return __builtin_aarch64_fabddf (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabd_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __builtin_aarch64_fabdv2sf (__a, __b);
+}
+
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabd_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return (float64x1_t) {vabdd_f64 (vget_lane_f64 (__a, 0),
+ vget_lane_f64 (__b, 0))};
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_aarch64_fabdv4sf (__a, __b);
+}
+
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __builtin_aarch64_fabdv2df (__a, __b);
+}
+
/* vabs */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabs_f32 (float32x2_t __a)
{
return __builtin_aarch64_absv2sf (__a);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabs_f64 (float64x1_t __a)
{
return (float64x1_t) {__builtin_fabs (__a[0])};
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabs_s8 (int8x8_t __a)
{
return __builtin_aarch64_absv8qi (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabs_s16 (int16x4_t __a)
{
return __builtin_aarch64_absv4hi (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabs_s32 (int32x2_t __a)
{
return __builtin_aarch64_absv2si (__a);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabs_s64 (int64x1_t __a)
{
return (int64x1_t) {__builtin_aarch64_absdi (__a[0])};
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabsq_f32 (float32x4_t __a)
{
return __builtin_aarch64_absv4sf (__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabsq_f64 (float64x2_t __a)
{
return __builtin_aarch64_absv2df (__a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabsq_s8 (int8x16_t __a)
{
return __builtin_aarch64_absv16qi (__a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabsq_s16 (int16x8_t __a)
{
return __builtin_aarch64_absv8hi (__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabsq_s32 (int32x4_t __a)
{
return __builtin_aarch64_absv4si (__a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vabsq_s64 (int64x2_t __a)
{
return __builtin_aarch64_absv2di (__a);
@@ -10948,13 +11825,15 @@ vabsq_s64 (int64x2_t __a)
/* vadd */
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddd_s64 (int64_t __a, int64_t __b)
{
return __a + __b;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddd_u64 (uint64_t __a, uint64_t __b)
{
return __a + __b;
@@ -10962,103 +11841,120 @@ vaddd_u64 (uint64_t __a, uint64_t __b)
/* vaddv */
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddv_s8 (int8x8_t __a)
{
return __builtin_aarch64_reduc_plus_scal_v8qi (__a);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddv_s16 (int16x4_t __a)
{
return __builtin_aarch64_reduc_plus_scal_v4hi (__a);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddv_s32 (int32x2_t __a)
{
return __builtin_aarch64_reduc_plus_scal_v2si (__a);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddv_u8 (uint8x8_t __a)
{
return (uint8_t) __builtin_aarch64_reduc_plus_scal_v8qi ((int8x8_t) __a);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddv_u16 (uint16x4_t __a)
{
return (uint16_t) __builtin_aarch64_reduc_plus_scal_v4hi ((int16x4_t) __a);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddv_u32 (uint32x2_t __a)
{
return (int32_t) __builtin_aarch64_reduc_plus_scal_v2si ((int32x2_t) __a);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddvq_s8 (int8x16_t __a)
{
return __builtin_aarch64_reduc_plus_scal_v16qi (__a);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddvq_s16 (int16x8_t __a)
{
return __builtin_aarch64_reduc_plus_scal_v8hi (__a);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddvq_s32 (int32x4_t __a)
{
return __builtin_aarch64_reduc_plus_scal_v4si (__a);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddvq_s64 (int64x2_t __a)
{
return __builtin_aarch64_reduc_plus_scal_v2di (__a);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddvq_u8 (uint8x16_t __a)
{
return (uint8_t) __builtin_aarch64_reduc_plus_scal_v16qi ((int8x16_t) __a);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddvq_u16 (uint16x8_t __a)
{
return (uint16_t) __builtin_aarch64_reduc_plus_scal_v8hi ((int16x8_t) __a);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddvq_u32 (uint32x4_t __a)
{
return (uint32_t) __builtin_aarch64_reduc_plus_scal_v4si ((int32x4_t) __a);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddvq_u64 (uint64x2_t __a)
{
return (uint64_t) __builtin_aarch64_reduc_plus_scal_v2di ((int64x2_t) __a);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddv_f32 (float32x2_t __a)
{
return __builtin_aarch64_reduc_plus_scal_v2sf (__a);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddvq_f32 (float32x4_t __a)
{
return __builtin_aarch64_reduc_plus_scal_v4sf (__a);
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaddvq_f64 (float64x2_t __a)
{
return __builtin_aarch64_reduc_plus_scal_v2df (__a);
@@ -11066,368 +11962,456 @@ vaddvq_f64 (float64x2_t __a)
/* vbsl */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbsl_f16 (uint16x4_t __a, float16x4_t __b, float16x4_t __c)
+{
+ return __builtin_aarch64_simd_bslv4hf_suss (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbsl_f32 (uint32x2_t __a, float32x2_t __b, float32x2_t __c)
{
return __builtin_aarch64_simd_bslv2sf_suss (__a, __b, __c);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbsl_f64 (uint64x1_t __a, float64x1_t __b, float64x1_t __c)
{
return (float64x1_t)
{ __builtin_aarch64_simd_bsldf_suss (__a[0], __b[0], __c[0]) };
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbsl_p8 (uint8x8_t __a, poly8x8_t __b, poly8x8_t __c)
{
return __builtin_aarch64_simd_bslv8qi_pupp (__a, __b, __c);
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbsl_p16 (uint16x4_t __a, poly16x4_t __b, poly16x4_t __c)
{
return __builtin_aarch64_simd_bslv4hi_pupp (__a, __b, __c);
}
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbsl_p64 (uint64x1_t __a, poly64x1_t __b, poly64x1_t __c)
+{
+ return (poly64x1_t)
+ {__builtin_aarch64_simd_bsldi_pupp (__a[0], __b[0], __c[0])};
+}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbsl_s8 (uint8x8_t __a, int8x8_t __b, int8x8_t __c)
{
return __builtin_aarch64_simd_bslv8qi_suss (__a, __b, __c);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbsl_s16 (uint16x4_t __a, int16x4_t __b, int16x4_t __c)
{
return __builtin_aarch64_simd_bslv4hi_suss (__a, __b, __c);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbsl_s32 (uint32x2_t __a, int32x2_t __b, int32x2_t __c)
{
return __builtin_aarch64_simd_bslv2si_suss (__a, __b, __c);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbsl_s64 (uint64x1_t __a, int64x1_t __b, int64x1_t __c)
{
return (int64x1_t)
{__builtin_aarch64_simd_bsldi_suss (__a[0], __b[0], __c[0])};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbsl_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
return __builtin_aarch64_simd_bslv8qi_uuuu (__a, __b, __c);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbsl_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
{
return __builtin_aarch64_simd_bslv4hi_uuuu (__a, __b, __c);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbsl_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
{
return __builtin_aarch64_simd_bslv2si_uuuu (__a, __b, __c);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbsl_u64 (uint64x1_t __a, uint64x1_t __b, uint64x1_t __c)
{
return (uint64x1_t)
{__builtin_aarch64_simd_bsldi_uuuu (__a[0], __b[0], __c[0])};
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbslq_f16 (uint16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __builtin_aarch64_simd_bslv8hf_suss (__a, __b, __c);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbslq_f32 (uint32x4_t __a, float32x4_t __b, float32x4_t __c)
{
return __builtin_aarch64_simd_bslv4sf_suss (__a, __b, __c);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbslq_f64 (uint64x2_t __a, float64x2_t __b, float64x2_t __c)
{
return __builtin_aarch64_simd_bslv2df_suss (__a, __b, __c);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbslq_p8 (uint8x16_t __a, poly8x16_t __b, poly8x16_t __c)
{
return __builtin_aarch64_simd_bslv16qi_pupp (__a, __b, __c);
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbslq_p16 (uint16x8_t __a, poly16x8_t __b, poly16x8_t __c)
{
return __builtin_aarch64_simd_bslv8hi_pupp (__a, __b, __c);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbslq_s8 (uint8x16_t __a, int8x16_t __b, int8x16_t __c)
{
return __builtin_aarch64_simd_bslv16qi_suss (__a, __b, __c);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbslq_s16 (uint16x8_t __a, int16x8_t __b, int16x8_t __c)
{
return __builtin_aarch64_simd_bslv8hi_suss (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vbslq_p64 (uint64x2_t __a, poly64x2_t __b, poly64x2_t __c)
+{
+ return __builtin_aarch64_simd_bslv2di_pupp (__a, __b, __c);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbslq_s32 (uint32x4_t __a, int32x4_t __b, int32x4_t __c)
{
return __builtin_aarch64_simd_bslv4si_suss (__a, __b, __c);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbslq_s64 (uint64x2_t __a, int64x2_t __b, int64x2_t __c)
{
return __builtin_aarch64_simd_bslv2di_suss (__a, __b, __c);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbslq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
{
return __builtin_aarch64_simd_bslv16qi_uuuu (__a, __b, __c);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbslq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
{
return __builtin_aarch64_simd_bslv8hi_uuuu (__a, __b, __c);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbslq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
{
return __builtin_aarch64_simd_bslv4si_uuuu (__a, __b, __c);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vbslq_u64 (uint64x2_t __a, uint64x2_t __b, uint64x2_t __c)
{
return __builtin_aarch64_simd_bslv2di_uuuu (__a, __b, __c);
}
-/* ARMv8.1 instrinsics. */
+/* ARMv8.1-A instrinsics. */
#pragma GCC push_options
#pragma GCC target ("arch=armv8.1-a")
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlah_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
{
return __builtin_aarch64_sqrdmlahv4hi (__a, __b, __c);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlah_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
{
return __builtin_aarch64_sqrdmlahv2si (__a, __b, __c);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlahq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
{
return __builtin_aarch64_sqrdmlahv8hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlahq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
{
return __builtin_aarch64_sqrdmlahv4si (__a, __b, __c);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlsh_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
{
return __builtin_aarch64_sqrdmlshv4hi (__a, __b, __c);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlsh_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
{
return __builtin_aarch64_sqrdmlshv2si (__a, __b, __c);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlshq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
{
return __builtin_aarch64_sqrdmlshv8hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlshq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
{
return __builtin_aarch64_sqrdmlshv4si (__a, __b, __c);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlah_laneq_s16 (int16x4_t __a, int16x4_t __b, int16x8_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlah_laneqv4hi (__a, __b, __c, __d);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlah_laneq_s32 (int32x2_t __a, int32x2_t __b, int32x4_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlah_laneqv2si (__a, __b, __c, __d);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlahq_laneq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlah_laneqv8hi (__a, __b, __c, __d);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlahq_laneq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlah_laneqv4si (__a, __b, __c, __d);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlsh_laneq_s16 (int16x4_t __a, int16x4_t __b, int16x8_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlsh_laneqv4hi (__a, __b, __c, __d);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlsh_laneq_s32 (int32x2_t __a, int32x2_t __b, int32x4_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlsh_laneqv2si (__a, __b, __c, __d);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlshq_laneq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlsh_laneqv8hi (__a, __b, __c, __d);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlshq_laneq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlsh_laneqv4si (__a, __b, __c, __d);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlah_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlah_lanev4hi (__a, __b, __c, __d);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlah_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlah_lanev2si (__a, __b, __c, __d);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlahq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlah_lanev8hi (__a, __b, __c, __d);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlahq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlah_lanev4si (__a, __b, __c, __d);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlahh_s16 (int16_t __a, int16_t __b, int16_t __c)
{
return (int16_t) __builtin_aarch64_sqrdmlahhi (__a, __b, __c);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlahh_lane_s16 (int16_t __a, int16_t __b, int16x4_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlah_lanehi (__a, __b, __c, __d);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlahh_laneq_s16 (int16_t __a, int16_t __b, int16x8_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlah_laneqhi (__a, __b, __c, __d);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlahs_s32 (int32_t __a, int32_t __b, int32_t __c)
{
return (int32_t) __builtin_aarch64_sqrdmlahsi (__a, __b, __c);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlahs_lane_s32 (int32_t __a, int32_t __b, int32x2_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlah_lanesi (__a, __b, __c, __d);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlahs_laneq_s32 (int32_t __a, int32_t __b, int32x4_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlah_laneqsi (__a, __b, __c, __d);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlsh_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlsh_lanev4hi (__a, __b, __c, __d);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlsh_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlsh_lanev2si (__a, __b, __c, __d);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlshq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlsh_lanev8hi (__a, __b, __c, __d);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlshq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlsh_lanev4si (__a, __b, __c, __d);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlshh_s16 (int16_t __a, int16_t __b, int16_t __c)
{
return (int16_t) __builtin_aarch64_sqrdmlshhi (__a, __b, __c);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlshh_lane_s16 (int16_t __a, int16_t __b, int16x4_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlsh_lanehi (__a, __b, __c, __d);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlshh_laneq_s16 (int16_t __a, int16_t __b, int16x8_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlsh_laneqhi (__a, __b, __c, __d);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlshs_s32 (int32_t __a, int32_t __b, int32_t __c)
{
return (int32_t) __builtin_aarch64_sqrdmlshsi (__a, __b, __c);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlshs_lane_s32 (int32_t __a, int32_t __b, int32x2_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlsh_lanesi (__a, __b, __c, __d);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlshs_laneq_s32 (int32_t __a, int32_t __b, int32x4_t __c, const int __d)
{
return __builtin_aarch64_sqrdmlsh_laneqsi (__a, __b, __c, __d);
@@ -11438,25 +12422,29 @@ vqrdmlshs_laneq_s32 (int32_t __a, int32_t __b, int32x4_t __c, const int __d)
#pragma GCC target ("+nothing+crypto")
/* vaes */
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaeseq_u8 (uint8x16_t data, uint8x16_t key)
{
return __builtin_aarch64_crypto_aesev16qi_uuu (data, key);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaesdq_u8 (uint8x16_t data, uint8x16_t key)
{
return __builtin_aarch64_crypto_aesdv16qi_uuu (data, key);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaesmcq_u8 (uint8x16_t data)
{
return __builtin_aarch64_crypto_aesmcv16qi_uu (data);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vaesimcq_u8 (uint8x16_t data)
{
return __builtin_aarch64_crypto_aesimcv16qi_uu (data);
@@ -11465,37 +12453,43 @@ vaesimcq_u8 (uint8x16_t data)
/* vcage */
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcage_f64 (float64x1_t __a, float64x1_t __b)
{
return vabs_f64 (__a) >= vabs_f64 (__b);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcages_f32 (float32_t __a, float32_t __b)
{
return __builtin_fabsf (__a) >= __builtin_fabsf (__b) ? -1 : 0;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcage_f32 (float32x2_t __a, float32x2_t __b)
{
return vabs_f32 (__a) >= vabs_f32 (__b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcageq_f32 (float32x4_t __a, float32x4_t __b)
{
return vabsq_f32 (__a) >= vabsq_f32 (__b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcaged_f64 (float64_t __a, float64_t __b)
{
return __builtin_fabs (__a) >= __builtin_fabs (__b) ? -1 : 0;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcageq_f64 (float64x2_t __a, float64x2_t __b)
{
return vabsq_f64 (__a) >= vabsq_f64 (__b);
@@ -11503,37 +12497,43 @@ vcageq_f64 (float64x2_t __a, float64x2_t __b)
/* vcagt */
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcagts_f32 (float32_t __a, float32_t __b)
{
return __builtin_fabsf (__a) > __builtin_fabsf (__b) ? -1 : 0;
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcagt_f32 (float32x2_t __a, float32x2_t __b)
{
return vabs_f32 (__a) > vabs_f32 (__b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcagt_f64 (float64x1_t __a, float64x1_t __b)
{
return vabs_f64 (__a) > vabs_f64 (__b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcagtq_f32 (float32x4_t __a, float32x4_t __b)
{
return vabsq_f32 (__a) > vabsq_f32 (__b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcagtd_f64 (float64_t __a, float64_t __b)
{
return __builtin_fabs (__a) > __builtin_fabs (__b) ? -1 : 0;
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcagtq_f64 (float64x2_t __a, float64x2_t __b)
{
return vabsq_f64 (__a) > vabsq_f64 (__b);
@@ -11541,37 +12541,43 @@ vcagtq_f64 (float64x2_t __a, float64x2_t __b)
/* vcale */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcale_f32 (float32x2_t __a, float32x2_t __b)
{
return vabs_f32 (__a) <= vabs_f32 (__b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcale_f64 (float64x1_t __a, float64x1_t __b)
{
return vabs_f64 (__a) <= vabs_f64 (__b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcaled_f64 (float64_t __a, float64_t __b)
{
return __builtin_fabs (__a) <= __builtin_fabs (__b) ? -1 : 0;
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcales_f32 (float32_t __a, float32_t __b)
{
return __builtin_fabsf (__a) <= __builtin_fabsf (__b) ? -1 : 0;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcaleq_f32 (float32x4_t __a, float32x4_t __b)
{
return vabsq_f32 (__a) <= vabsq_f32 (__b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcaleq_f64 (float64x2_t __a, float64x2_t __b)
{
return vabsq_f64 (__a) <= vabsq_f64 (__b);
@@ -11579,37 +12585,43 @@ vcaleq_f64 (float64x2_t __a, float64x2_t __b)
/* vcalt */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcalt_f32 (float32x2_t __a, float32x2_t __b)
{
return vabs_f32 (__a) < vabs_f32 (__b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcalt_f64 (float64x1_t __a, float64x1_t __b)
{
return vabs_f64 (__a) < vabs_f64 (__b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcaltd_f64 (float64_t __a, float64_t __b)
{
return __builtin_fabs (__a) < __builtin_fabs (__b) ? -1 : 0;
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcaltq_f32 (float32x4_t __a, float32x4_t __b)
{
return vabsq_f32 (__a) < vabsq_f32 (__b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcaltq_f64 (float64x2_t __a, float64x2_t __b)
{
return vabsq_f64 (__a) < vabsq_f64 (__b);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcalts_f32 (float32_t __a, float32_t __b)
{
return __builtin_fabsf (__a) < __builtin_fabsf (__b) ? -1 : 0;
@@ -11617,133 +12629,162 @@ vcalts_f32 (float32_t __a, float32_t __b)
/* vceq - vector. */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceq_f32 (float32x2_t __a, float32x2_t __b)
{
return (uint32x2_t) (__a == __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceq_f64 (float64x1_t __a, float64x1_t __b)
{
return (uint64x1_t) (__a == __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceq_p8 (poly8x8_t __a, poly8x8_t __b)
{
return (uint8x8_t) (__a == __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceq_p64 (poly64x1_t __a, poly64x1_t __b)
+{
+ return (uint64x1_t) (__a == __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceq_s8 (int8x8_t __a, int8x8_t __b)
{
return (uint8x8_t) (__a == __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceq_s16 (int16x4_t __a, int16x4_t __b)
{
return (uint16x4_t) (__a == __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceq_s32 (int32x2_t __a, int32x2_t __b)
{
return (uint32x2_t) (__a == __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceq_s64 (int64x1_t __a, int64x1_t __b)
{
return (uint64x1_t) (__a == __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceq_u8 (uint8x8_t __a, uint8x8_t __b)
{
return (__a == __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceq_u16 (uint16x4_t __a, uint16x4_t __b)
{
return (__a == __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceq_u32 (uint32x2_t __a, uint32x2_t __b)
{
return (__a == __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceq_u64 (uint64x1_t __a, uint64x1_t __b)
{
return (__a == __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqq_f32 (float32x4_t __a, float32x4_t __b)
{
return (uint32x4_t) (__a == __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqq_f64 (float64x2_t __a, float64x2_t __b)
{
return (uint64x2_t) (__a == __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqq_p8 (poly8x16_t __a, poly8x16_t __b)
{
return (uint8x16_t) (__a == __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqq_s8 (int8x16_t __a, int8x16_t __b)
{
return (uint8x16_t) (__a == __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqq_s16 (int16x8_t __a, int16x8_t __b)
{
return (uint16x8_t) (__a == __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqq_s32 (int32x4_t __a, int32x4_t __b)
{
return (uint32x4_t) (__a == __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqq_s64 (int64x2_t __a, int64x2_t __b)
{
return (uint64x2_t) (__a == __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return (__a == __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return (__a == __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return (__a == __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqq_u64 (uint64x2_t __a, uint64x2_t __b)
{
return (__a == __b);
@@ -11751,25 +12792,29 @@ vceqq_u64 (uint64x2_t __a, uint64x2_t __b)
/* vceq - scalar. */
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqs_f32 (float32_t __a, float32_t __b)
{
return __a == __b ? -1 : 0;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqd_s64 (int64_t __a, int64_t __b)
{
return __a == __b ? -1ll : 0ll;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqd_u64 (uint64_t __a, uint64_t __b)
{
return __a == __b ? -1ll : 0ll;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqd_f64 (float64_t __a, float64_t __b)
{
return __a == __b ? -1ll : 0ll;
@@ -11777,133 +12822,155 @@ vceqd_f64 (float64_t __a, float64_t __b)
/* vceqz - vector. */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqz_f32 (float32x2_t __a)
{
return (uint32x2_t) (__a == 0.0f);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqz_f64 (float64x1_t __a)
{
return (uint64x1_t) (__a == (float64x1_t) {0.0});
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqz_p8 (poly8x8_t __a)
{
return (uint8x8_t) (__a == 0);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqz_s8 (int8x8_t __a)
{
return (uint8x8_t) (__a == 0);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqz_s16 (int16x4_t __a)
{
return (uint16x4_t) (__a == 0);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqz_s32 (int32x2_t __a)
{
return (uint32x2_t) (__a == 0);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqz_s64 (int64x1_t __a)
{
return (uint64x1_t) (__a == __AARCH64_INT64_C (0));
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqz_u8 (uint8x8_t __a)
{
return (__a == 0);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqz_u16 (uint16x4_t __a)
{
return (__a == 0);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqz_u32 (uint32x2_t __a)
{
return (__a == 0);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqz_u64 (uint64x1_t __a)
{
return (__a == __AARCH64_UINT64_C (0));
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqzq_f32 (float32x4_t __a)
{
return (uint32x4_t) (__a == 0.0f);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqzq_f64 (float64x2_t __a)
{
return (uint64x2_t) (__a == 0.0f);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqzq_p8 (poly8x16_t __a)
{
return (uint8x16_t) (__a == 0);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqzq_s8 (int8x16_t __a)
{
return (uint8x16_t) (__a == 0);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqzq_s16 (int16x8_t __a)
{
return (uint16x8_t) (__a == 0);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqzq_s32 (int32x4_t __a)
{
return (uint32x4_t) (__a == 0);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqzq_s64 (int64x2_t __a)
{
return (uint64x2_t) (__a == __AARCH64_INT64_C (0));
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqzq_u8 (uint8x16_t __a)
{
return (__a == 0);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqzq_u16 (uint16x8_t __a)
{
return (__a == 0);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqzq_u32 (uint32x4_t __a)
{
return (__a == 0);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqzq_u64 (uint64x2_t __a)
{
return (__a == __AARCH64_UINT64_C (0));
@@ -11911,25 +12978,29 @@ vceqzq_u64 (uint64x2_t __a)
/* vceqz - scalar. */
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqzs_f32 (float32_t __a)
{
return __a == 0.0f ? -1 : 0;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqzd_s64 (int64_t __a)
{
return __a == 0 ? -1ll : 0ll;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqzd_u64 (uint64_t __a)
{
return __a == 0 ? -1ll : 0ll;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vceqzd_f64 (float64_t __a)
{
return __a == 0.0 ? -1ll : 0ll;
@@ -11937,121 +13008,141 @@ vceqzd_f64 (float64_t __a)
/* vcge - vector. */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcge_f32 (float32x2_t __a, float32x2_t __b)
{
return (uint32x2_t) (__a >= __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcge_f64 (float64x1_t __a, float64x1_t __b)
{
return (uint64x1_t) (__a >= __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcge_s8 (int8x8_t __a, int8x8_t __b)
{
return (uint8x8_t) (__a >= __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcge_s16 (int16x4_t __a, int16x4_t __b)
{
return (uint16x4_t) (__a >= __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcge_s32 (int32x2_t __a, int32x2_t __b)
{
return (uint32x2_t) (__a >= __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcge_s64 (int64x1_t __a, int64x1_t __b)
{
return (uint64x1_t) (__a >= __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcge_u8 (uint8x8_t __a, uint8x8_t __b)
{
return (__a >= __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcge_u16 (uint16x4_t __a, uint16x4_t __b)
{
return (__a >= __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcge_u32 (uint32x2_t __a, uint32x2_t __b)
{
return (__a >= __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcge_u64 (uint64x1_t __a, uint64x1_t __b)
{
return (__a >= __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_f32 (float32x4_t __a, float32x4_t __b)
{
return (uint32x4_t) (__a >= __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_f64 (float64x2_t __a, float64x2_t __b)
{
return (uint64x2_t) (__a >= __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_s8 (int8x16_t __a, int8x16_t __b)
{
return (uint8x16_t) (__a >= __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_s16 (int16x8_t __a, int16x8_t __b)
{
return (uint16x8_t) (__a >= __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_s32 (int32x4_t __a, int32x4_t __b)
{
return (uint32x4_t) (__a >= __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_s64 (int64x2_t __a, int64x2_t __b)
{
return (uint64x2_t) (__a >= __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return (__a >= __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return (__a >= __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return (__a >= __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_u64 (uint64x2_t __a, uint64x2_t __b)
{
return (__a >= __b);
@@ -12059,25 +13150,29 @@ vcgeq_u64 (uint64x2_t __a, uint64x2_t __b)
/* vcge - scalar. */
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcges_f32 (float32_t __a, float32_t __b)
{
return __a >= __b ? -1 : 0;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcged_s64 (int64_t __a, int64_t __b)
{
return __a >= __b ? -1ll : 0ll;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcged_u64 (uint64_t __a, uint64_t __b)
{
return __a >= __b ? -1ll : 0ll;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcged_f64 (float64_t __a, float64_t __b)
{
return __a >= __b ? -1ll : 0ll;
@@ -12085,73 +13180,85 @@ vcged_f64 (float64_t __a, float64_t __b)
/* vcgez - vector. */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgez_f32 (float32x2_t __a)
{
return (uint32x2_t) (__a >= 0.0f);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgez_f64 (float64x1_t __a)
{
return (uint64x1_t) (__a[0] >= (float64x1_t) {0.0});
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgez_s8 (int8x8_t __a)
{
return (uint8x8_t) (__a >= 0);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgez_s16 (int16x4_t __a)
{
return (uint16x4_t) (__a >= 0);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgez_s32 (int32x2_t __a)
{
return (uint32x2_t) (__a >= 0);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgez_s64 (int64x1_t __a)
{
return (uint64x1_t) (__a >= __AARCH64_INT64_C (0));
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgezq_f32 (float32x4_t __a)
{
return (uint32x4_t) (__a >= 0.0f);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgezq_f64 (float64x2_t __a)
{
return (uint64x2_t) (__a >= 0.0);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgezq_s8 (int8x16_t __a)
{
return (uint8x16_t) (__a >= 0);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgezq_s16 (int16x8_t __a)
{
return (uint16x8_t) (__a >= 0);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgezq_s32 (int32x4_t __a)
{
return (uint32x4_t) (__a >= 0);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgezq_s64 (int64x2_t __a)
{
return (uint64x2_t) (__a >= __AARCH64_INT64_C (0));
@@ -12159,19 +13266,22 @@ vcgezq_s64 (int64x2_t __a)
/* vcgez - scalar. */
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgezs_f32 (float32_t __a)
{
return __a >= 0.0f ? -1 : 0;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgezd_s64 (int64_t __a)
{
return __a >= 0 ? -1ll : 0ll;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgezd_f64 (float64_t __a)
{
return __a >= 0.0 ? -1ll : 0ll;
@@ -12179,121 +13289,141 @@ vcgezd_f64 (float64_t __a)
/* vcgt - vector. */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgt_f32 (float32x2_t __a, float32x2_t __b)
{
return (uint32x2_t) (__a > __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgt_f64 (float64x1_t __a, float64x1_t __b)
{
return (uint64x1_t) (__a > __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgt_s8 (int8x8_t __a, int8x8_t __b)
{
return (uint8x8_t) (__a > __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgt_s16 (int16x4_t __a, int16x4_t __b)
{
return (uint16x4_t) (__a > __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgt_s32 (int32x2_t __a, int32x2_t __b)
{
return (uint32x2_t) (__a > __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgt_s64 (int64x1_t __a, int64x1_t __b)
{
return (uint64x1_t) (__a > __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgt_u8 (uint8x8_t __a, uint8x8_t __b)
{
return (__a > __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgt_u16 (uint16x4_t __a, uint16x4_t __b)
{
return (__a > __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgt_u32 (uint32x2_t __a, uint32x2_t __b)
{
return (__a > __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgt_u64 (uint64x1_t __a, uint64x1_t __b)
{
return (__a > __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_f32 (float32x4_t __a, float32x4_t __b)
{
return (uint32x4_t) (__a > __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_f64 (float64x2_t __a, float64x2_t __b)
{
return (uint64x2_t) (__a > __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_s8 (int8x16_t __a, int8x16_t __b)
{
return (uint8x16_t) (__a > __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_s16 (int16x8_t __a, int16x8_t __b)
{
return (uint16x8_t) (__a > __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_s32 (int32x4_t __a, int32x4_t __b)
{
return (uint32x4_t) (__a > __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_s64 (int64x2_t __a, int64x2_t __b)
{
return (uint64x2_t) (__a > __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return (__a > __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return (__a > __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return (__a > __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_u64 (uint64x2_t __a, uint64x2_t __b)
{
return (__a > __b);
@@ -12301,25 +13431,29 @@ vcgtq_u64 (uint64x2_t __a, uint64x2_t __b)
/* vcgt - scalar. */
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgts_f32 (float32_t __a, float32_t __b)
{
return __a > __b ? -1 : 0;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtd_s64 (int64_t __a, int64_t __b)
{
return __a > __b ? -1ll : 0ll;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtd_u64 (uint64_t __a, uint64_t __b)
{
return __a > __b ? -1ll : 0ll;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtd_f64 (float64_t __a, float64_t __b)
{
return __a > __b ? -1ll : 0ll;
@@ -12327,73 +13461,85 @@ vcgtd_f64 (float64_t __a, float64_t __b)
/* vcgtz - vector. */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtz_f32 (float32x2_t __a)
{
return (uint32x2_t) (__a > 0.0f);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtz_f64 (float64x1_t __a)
{
return (uint64x1_t) (__a > (float64x1_t) {0.0});
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtz_s8 (int8x8_t __a)
{
return (uint8x8_t) (__a > 0);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtz_s16 (int16x4_t __a)
{
return (uint16x4_t) (__a > 0);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtz_s32 (int32x2_t __a)
{
return (uint32x2_t) (__a > 0);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtz_s64 (int64x1_t __a)
{
return (uint64x1_t) (__a > __AARCH64_INT64_C (0));
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtzq_f32 (float32x4_t __a)
{
return (uint32x4_t) (__a > 0.0f);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtzq_f64 (float64x2_t __a)
{
return (uint64x2_t) (__a > 0.0);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtzq_s8 (int8x16_t __a)
{
return (uint8x16_t) (__a > 0);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtzq_s16 (int16x8_t __a)
{
return (uint16x8_t) (__a > 0);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtzq_s32 (int32x4_t __a)
{
return (uint32x4_t) (__a > 0);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtzq_s64 (int64x2_t __a)
{
return (uint64x2_t) (__a > __AARCH64_INT64_C (0));
@@ -12401,19 +13547,22 @@ vcgtzq_s64 (int64x2_t __a)
/* vcgtz - scalar. */
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtzs_f32 (float32_t __a)
{
return __a > 0.0f ? -1 : 0;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtzd_s64 (int64_t __a)
{
return __a > 0 ? -1ll : 0ll;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcgtzd_f64 (float64_t __a)
{
return __a > 0.0 ? -1ll : 0ll;
@@ -12421,121 +13570,141 @@ vcgtzd_f64 (float64_t __a)
/* vcle - vector. */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcle_f32 (float32x2_t __a, float32x2_t __b)
{
return (uint32x2_t) (__a <= __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcle_f64 (float64x1_t __a, float64x1_t __b)
{
return (uint64x1_t) (__a <= __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcle_s8 (int8x8_t __a, int8x8_t __b)
{
return (uint8x8_t) (__a <= __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcle_s16 (int16x4_t __a, int16x4_t __b)
{
return (uint16x4_t) (__a <= __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcle_s32 (int32x2_t __a, int32x2_t __b)
{
return (uint32x2_t) (__a <= __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcle_s64 (int64x1_t __a, int64x1_t __b)
{
return (uint64x1_t) (__a <= __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcle_u8 (uint8x8_t __a, uint8x8_t __b)
{
return (__a <= __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcle_u16 (uint16x4_t __a, uint16x4_t __b)
{
return (__a <= __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcle_u32 (uint32x2_t __a, uint32x2_t __b)
{
return (__a <= __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcle_u64 (uint64x1_t __a, uint64x1_t __b)
{
return (__a <= __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcleq_f32 (float32x4_t __a, float32x4_t __b)
{
return (uint32x4_t) (__a <= __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcleq_f64 (float64x2_t __a, float64x2_t __b)
{
return (uint64x2_t) (__a <= __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcleq_s8 (int8x16_t __a, int8x16_t __b)
{
return (uint8x16_t) (__a <= __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcleq_s16 (int16x8_t __a, int16x8_t __b)
{
return (uint16x8_t) (__a <= __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcleq_s32 (int32x4_t __a, int32x4_t __b)
{
return (uint32x4_t) (__a <= __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcleq_s64 (int64x2_t __a, int64x2_t __b)
{
return (uint64x2_t) (__a <= __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcleq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return (__a <= __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcleq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return (__a <= __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcleq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return (__a <= __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcleq_u64 (uint64x2_t __a, uint64x2_t __b)
{
return (__a <= __b);
@@ -12543,25 +13712,29 @@ vcleq_u64 (uint64x2_t __a, uint64x2_t __b)
/* vcle - scalar. */
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcles_f32 (float32_t __a, float32_t __b)
{
return __a <= __b ? -1 : 0;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcled_s64 (int64_t __a, int64_t __b)
{
return __a <= __b ? -1ll : 0ll;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcled_u64 (uint64_t __a, uint64_t __b)
{
return __a <= __b ? -1ll : 0ll;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcled_f64 (float64_t __a, float64_t __b)
{
return __a <= __b ? -1ll : 0ll;
@@ -12569,73 +13742,85 @@ vcled_f64 (float64_t __a, float64_t __b)
/* vclez - vector. */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclez_f32 (float32x2_t __a)
{
return (uint32x2_t) (__a <= 0.0f);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclez_f64 (float64x1_t __a)
{
return (uint64x1_t) (__a <= (float64x1_t) {0.0});
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclez_s8 (int8x8_t __a)
{
return (uint8x8_t) (__a <= 0);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclez_s16 (int16x4_t __a)
{
return (uint16x4_t) (__a <= 0);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclez_s32 (int32x2_t __a)
{
return (uint32x2_t) (__a <= 0);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclez_s64 (int64x1_t __a)
{
return (uint64x1_t) (__a <= __AARCH64_INT64_C (0));
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclezq_f32 (float32x4_t __a)
{
return (uint32x4_t) (__a <= 0.0f);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclezq_f64 (float64x2_t __a)
{
return (uint64x2_t) (__a <= 0.0);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclezq_s8 (int8x16_t __a)
{
return (uint8x16_t) (__a <= 0);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclezq_s16 (int16x8_t __a)
{
return (uint16x8_t) (__a <= 0);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclezq_s32 (int32x4_t __a)
{
return (uint32x4_t) (__a <= 0);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclezq_s64 (int64x2_t __a)
{
return (uint64x2_t) (__a <= __AARCH64_INT64_C (0));
@@ -12643,19 +13828,22 @@ vclezq_s64 (int64x2_t __a)
/* vclez - scalar. */
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclezs_f32 (float32_t __a)
{
return __a <= 0.0f ? -1 : 0;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclezd_s64 (int64_t __a)
{
return __a <= 0 ? -1ll : 0ll;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclezd_f64 (float64_t __a)
{
return __a <= 0.0 ? -1ll : 0ll;
@@ -12663,121 +13851,141 @@ vclezd_f64 (float64_t __a)
/* vclt - vector. */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclt_f32 (float32x2_t __a, float32x2_t __b)
{
return (uint32x2_t) (__a < __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclt_f64 (float64x1_t __a, float64x1_t __b)
{
return (uint64x1_t) (__a < __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclt_s8 (int8x8_t __a, int8x8_t __b)
{
return (uint8x8_t) (__a < __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclt_s16 (int16x4_t __a, int16x4_t __b)
{
return (uint16x4_t) (__a < __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclt_s32 (int32x2_t __a, int32x2_t __b)
{
return (uint32x2_t) (__a < __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclt_s64 (int64x1_t __a, int64x1_t __b)
{
return (uint64x1_t) (__a < __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclt_u8 (uint8x8_t __a, uint8x8_t __b)
{
return (__a < __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclt_u16 (uint16x4_t __a, uint16x4_t __b)
{
return (__a < __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclt_u32 (uint32x2_t __a, uint32x2_t __b)
{
return (__a < __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclt_u64 (uint64x1_t __a, uint64x1_t __b)
{
return (__a < __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltq_f32 (float32x4_t __a, float32x4_t __b)
{
return (uint32x4_t) (__a < __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltq_f64 (float64x2_t __a, float64x2_t __b)
{
return (uint64x2_t) (__a < __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltq_s8 (int8x16_t __a, int8x16_t __b)
{
return (uint8x16_t) (__a < __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltq_s16 (int16x8_t __a, int16x8_t __b)
{
return (uint16x8_t) (__a < __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltq_s32 (int32x4_t __a, int32x4_t __b)
{
return (uint32x4_t) (__a < __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltq_s64 (int64x2_t __a, int64x2_t __b)
{
return (uint64x2_t) (__a < __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return (__a < __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return (__a < __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return (__a < __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltq_u64 (uint64x2_t __a, uint64x2_t __b)
{
return (__a < __b);
@@ -12785,25 +13993,29 @@ vcltq_u64 (uint64x2_t __a, uint64x2_t __b)
/* vclt - scalar. */
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclts_f32 (float32_t __a, float32_t __b)
{
return __a < __b ? -1 : 0;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltd_s64 (int64_t __a, int64_t __b)
{
return __a < __b ? -1ll : 0ll;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltd_u64 (uint64_t __a, uint64_t __b)
{
return __a < __b ? -1ll : 0ll;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltd_f64 (float64_t __a, float64_t __b)
{
return __a < __b ? -1ll : 0ll;
@@ -12811,73 +14023,85 @@ vcltd_f64 (float64_t __a, float64_t __b)
/* vcltz - vector. */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltz_f32 (float32x2_t __a)
{
return (uint32x2_t) (__a < 0.0f);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltz_f64 (float64x1_t __a)
{
return (uint64x1_t) (__a < (float64x1_t) {0.0});
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltz_s8 (int8x8_t __a)
{
return (uint8x8_t) (__a < 0);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltz_s16 (int16x4_t __a)
{
return (uint16x4_t) (__a < 0);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltz_s32 (int32x2_t __a)
{
return (uint32x2_t) (__a < 0);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltz_s64 (int64x1_t __a)
{
return (uint64x1_t) (__a < __AARCH64_INT64_C (0));
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltzq_f32 (float32x4_t __a)
{
return (uint32x4_t) (__a < 0.0f);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltzq_f64 (float64x2_t __a)
{
return (uint64x2_t) (__a < 0.0);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltzq_s8 (int8x16_t __a)
{
return (uint8x16_t) (__a < 0);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltzq_s16 (int16x8_t __a)
{
return (uint16x8_t) (__a < 0);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltzq_s32 (int32x4_t __a)
{
return (uint32x4_t) (__a < 0);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltzq_s64 (int64x2_t __a)
{
return (uint64x2_t) (__a < __AARCH64_INT64_C (0));
@@ -12885,19 +14109,22 @@ vcltzq_s64 (int64x2_t __a)
/* vcltz - scalar. */
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltzs_f32 (float32_t __a)
{
return __a < 0.0f ? -1 : 0;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltzd_s64 (int64_t __a)
{
return __a < 0 ? -1ll : 0ll;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcltzd_f64 (float64_t __a)
{
return __a < 0.0 ? -1ll : 0ll;
@@ -12905,37 +14132,43 @@ vcltzd_f64 (float64_t __a)
/* vcls. */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcls_s8 (int8x8_t __a)
{
return __builtin_aarch64_clrsbv8qi (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcls_s16 (int16x4_t __a)
{
return __builtin_aarch64_clrsbv4hi (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcls_s32 (int32x2_t __a)
{
return __builtin_aarch64_clrsbv2si (__a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclsq_s8 (int8x16_t __a)
{
return __builtin_aarch64_clrsbv16qi (__a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclsq_s16 (int16x8_t __a)
{
return __builtin_aarch64_clrsbv8hi (__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclsq_s32 (int32x4_t __a)
{
return __builtin_aarch64_clrsbv4si (__a);
@@ -12943,73 +14176,85 @@ vclsq_s32 (int32x4_t __a)
/* vclz. */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclz_s8 (int8x8_t __a)
{
return __builtin_aarch64_clzv8qi (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclz_s16 (int16x4_t __a)
{
return __builtin_aarch64_clzv4hi (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclz_s32 (int32x2_t __a)
{
return __builtin_aarch64_clzv2si (__a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclz_u8 (uint8x8_t __a)
{
return (uint8x8_t)__builtin_aarch64_clzv8qi ((int8x8_t)__a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclz_u16 (uint16x4_t __a)
{
return (uint16x4_t)__builtin_aarch64_clzv4hi ((int16x4_t)__a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclz_u32 (uint32x2_t __a)
{
return (uint32x2_t)__builtin_aarch64_clzv2si ((int32x2_t)__a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclzq_s8 (int8x16_t __a)
{
return __builtin_aarch64_clzv16qi (__a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclzq_s16 (int16x8_t __a)
{
return __builtin_aarch64_clzv8hi (__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclzq_s32 (int32x4_t __a)
{
return __builtin_aarch64_clzv4si (__a);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclzq_u8 (uint8x16_t __a)
{
return (uint8x16_t)__builtin_aarch64_clzv16qi ((int8x16_t)__a);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclzq_u16 (uint16x8_t __a)
{
return (uint16x8_t)__builtin_aarch64_clzv8hi ((int16x8_t)__a);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vclzq_u32 (uint32x4_t __a)
{
return (uint32x4_t)__builtin_aarch64_clzv4si ((int32x4_t)__a);
@@ -13017,63 +14262,549 @@ vclzq_u32 (uint32x4_t __a)
/* vcnt. */
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcnt_p8 (poly8x8_t __a)
{
return (poly8x8_t) __builtin_aarch64_popcountv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcnt_s8 (int8x8_t __a)
{
return __builtin_aarch64_popcountv8qi (__a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcnt_u8 (uint8x8_t __a)
{
return (uint8x8_t) __builtin_aarch64_popcountv8qi ((int8x8_t) __a);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcntq_p8 (poly8x16_t __a)
{
return (poly8x16_t) __builtin_aarch64_popcountv16qi ((int8x16_t) __a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcntq_s8 (int8x16_t __a)
{
return __builtin_aarch64_popcountv16qi (__a);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcntq_u8 (uint8x16_t __a)
{
return (uint8x16_t) __builtin_aarch64_popcountv16qi ((int8x16_t) __a);
}
+/* vcopy_lane. */
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_lane_f32 (float32x2_t __a, const int __lane1,
+ float32x2_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_lane_f64 (float64x1_t __a, const int __lane1,
+ float64x1_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_lane_p8 (poly8x8_t __a, const int __lane1,
+ poly8x8_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_lane_p16 (poly16x4_t __a, const int __lane1,
+ poly16x4_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_lane_p64 (poly64x1_t __a, const int __lane1,
+ poly64x1_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_lane_s8 (int8x8_t __a, const int __lane1,
+ int8x8_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_lane_s16 (int16x4_t __a, const int __lane1,
+ int16x4_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_lane_s32 (int32x2_t __a, const int __lane1,
+ int32x2_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_lane_s64 (int64x1_t __a, const int __lane1,
+ int64x1_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_lane_u8 (uint8x8_t __a, const int __lane1,
+ uint8x8_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_lane_u16 (uint16x4_t __a, const int __lane1,
+ uint16x4_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_lane_u32 (uint32x2_t __a, const int __lane1,
+ uint32x2_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_lane_u64 (uint64x1_t __a, const int __lane1,
+ uint64x1_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+/* vcopy_laneq. */
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_laneq_f32 (float32x2_t __a, const int __lane1,
+ float32x4_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_laneq_f64 (float64x1_t __a, const int __lane1,
+ float64x2_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_laneq_p8 (poly8x8_t __a, const int __lane1,
+ poly8x16_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_laneq_p16 (poly16x4_t __a, const int __lane1,
+ poly16x8_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_laneq_p64 (poly64x1_t __a, const int __lane1,
+ poly64x2_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_laneq_s8 (int8x8_t __a, const int __lane1,
+ int8x16_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_laneq_s16 (int16x4_t __a, const int __lane1,
+ int16x8_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_laneq_s32 (int32x2_t __a, const int __lane1,
+ int32x4_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_laneq_s64 (int64x1_t __a, const int __lane1,
+ int64x2_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_laneq_u8 (uint8x8_t __a, const int __lane1,
+ uint8x16_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_laneq_u16 (uint16x4_t __a, const int __lane1,
+ uint16x8_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_laneq_u32 (uint32x2_t __a, const int __lane1,
+ uint32x4_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopy_laneq_u64 (uint64x1_t __a, const int __lane1,
+ uint64x2_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+/* vcopyq_lane. */
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_lane_f32 (float32x4_t __a, const int __lane1,
+ float32x2_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_lane_f64 (float64x2_t __a, const int __lane1,
+ float64x1_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_lane_p8 (poly8x16_t __a, const int __lane1,
+ poly8x8_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_lane_p16 (poly16x8_t __a, const int __lane1,
+ poly16x4_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_lane_p64 (poly64x2_t __a, const int __lane1,
+ poly64x1_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_lane_s8 (int8x16_t __a, const int __lane1,
+ int8x8_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_lane_s16 (int16x8_t __a, const int __lane1,
+ int16x4_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_lane_s32 (int32x4_t __a, const int __lane1,
+ int32x2_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_lane_s64 (int64x2_t __a, const int __lane1,
+ int64x1_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_lane_u8 (uint8x16_t __a, const int __lane1,
+ uint8x8_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_lane_u16 (uint16x8_t __a, const int __lane1,
+ uint16x4_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_lane_u32 (uint32x4_t __a, const int __lane1,
+ uint32x2_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_lane_u64 (uint64x2_t __a, const int __lane1,
+ uint64x1_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+/* vcopyq_laneq. */
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_laneq_f32 (float32x4_t __a, const int __lane1,
+ float32x4_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_laneq_f64 (float64x2_t __a, const int __lane1,
+ float64x2_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_laneq_p8 (poly8x16_t __a, const int __lane1,
+ poly8x16_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_laneq_p16 (poly16x8_t __a, const int __lane1,
+ poly16x8_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_laneq_p64 (poly64x2_t __a, const int __lane1,
+ poly64x2_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_laneq_s8 (int8x16_t __a, const int __lane1,
+ int8x16_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_laneq_s16 (int16x8_t __a, const int __lane1,
+ int16x8_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_laneq_s32 (int32x4_t __a, const int __lane1,
+ int32x4_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_laneq_s64 (int64x2_t __a, const int __lane1,
+ int64x2_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_laneq_u8 (uint8x16_t __a, const int __lane1,
+ uint8x16_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_laneq_u16 (uint16x8_t __a, const int __lane1,
+ uint16x8_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_laneq_u32 (uint32x4_t __a, const int __lane1,
+ uint32x4_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcopyq_laneq_u64 (uint64x2_t __a, const int __lane1,
+ uint64x2_t __b, const int __lane2)
+{
+ return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
+ __a, __lane1);
+}
+
/* vcvt (double -> float). */
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvt_f16_f32 (float32x4_t __a)
{
return __builtin_aarch64_float_truncate_lo_v4hf (__a);
}
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvt_high_f16_f32 (float16x4_t __a, float32x4_t __b)
{
return __builtin_aarch64_float_truncate_hi_v8hf (__a, __b);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvt_f32_f64 (float64x2_t __a)
{
return __builtin_aarch64_float_truncate_lo_v2sf (__a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvt_high_f32_f64 (float32x2_t __a, float64x2_t __b)
{
return __builtin_aarch64_float_truncate_hi_v4sf (__a, __b);
@@ -13081,88 +14812,292 @@ vcvt_high_f32_f64 (float32x2_t __a, float64x2_t __b)
/* vcvt (float -> double). */
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvt_f32_f16 (float16x4_t __a)
{
return __builtin_aarch64_float_extend_lo_v4sf (__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvt_f64_f32 (float32x2_t __a)
{
return __builtin_aarch64_float_extend_lo_v2df (__a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvt_high_f32_f16 (float16x8_t __a)
{
return __builtin_aarch64_vec_unpacks_hi_v8hf (__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvt_high_f64_f32 (float32x4_t __a)
{
return __builtin_aarch64_vec_unpacks_hi_v4sf (__a);
}
+/* vcvt (<u>fixed-point -> float). */
+
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtd_n_f64_s64 (int64_t __a, const int __b)
+{
+ return __builtin_aarch64_scvtfdi (__a, __b);
+}
+
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtd_n_f64_u64 (uint64_t __a, const int __b)
+{
+ return __builtin_aarch64_ucvtfdi_sus (__a, __b);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvts_n_f32_s32 (int32_t __a, const int __b)
+{
+ return __builtin_aarch64_scvtfsi (__a, __b);
+}
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvts_n_f32_u32 (uint32_t __a, const int __b)
+{
+ return __builtin_aarch64_ucvtfsi_sus (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_f32_s32 (int32x2_t __a, const int __b)
+{
+ return __builtin_aarch64_scvtfv2si (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_f32_u32 (uint32x2_t __a, const int __b)
+{
+ return __builtin_aarch64_ucvtfv2si_sus (__a, __b);
+}
+
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_f64_s64 (int64x1_t __a, const int __b)
+{
+ return (float64x1_t)
+ { __builtin_aarch64_scvtfdi (vget_lane_s64 (__a, 0), __b) };
+}
+
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_f64_u64 (uint64x1_t __a, const int __b)
+{
+ return (float64x1_t)
+ { __builtin_aarch64_ucvtfdi_sus (vget_lane_u64 (__a, 0), __b) };
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_f32_s32 (int32x4_t __a, const int __b)
+{
+ return __builtin_aarch64_scvtfv4si (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_f32_u32 (uint32x4_t __a, const int __b)
+{
+ return __builtin_aarch64_ucvtfv4si_sus (__a, __b);
+}
+
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_f64_s64 (int64x2_t __a, const int __b)
+{
+ return __builtin_aarch64_scvtfv2di (__a, __b);
+}
+
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_f64_u64 (uint64x2_t __a, const int __b)
+{
+ return __builtin_aarch64_ucvtfv2di_sus (__a, __b);
+}
+
+/* vcvt (float -> <u>fixed-point). */
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtd_n_s64_f64 (float64_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzsdf (__a, __b);
+}
+
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtd_n_u64_f64 (float64_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzudf_uss (__a, __b);
+}
+
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvts_n_s32_f32 (float32_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzssf (__a, __b);
+}
+
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvts_n_u32_f32 (float32_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzusf_uss (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_s32_f32 (float32x2_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzsv2sf (__a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_u32_f32 (float32x2_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzuv2sf_uss (__a, __b);
+}
+
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_s64_f64 (float64x1_t __a, const int __b)
+{
+ return (int64x1_t)
+ { __builtin_aarch64_fcvtzsdf (vget_lane_f64 (__a, 0), __b) };
+}
+
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_u64_f64 (float64x1_t __a, const int __b)
+{
+ return (uint64x1_t)
+ { __builtin_aarch64_fcvtzudf_uss (vget_lane_f64 (__a, 0), __b) };
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_s32_f32 (float32x4_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzsv4sf (__a, __b);
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_u32_f32 (float32x4_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzuv4sf_uss (__a, __b);
+}
+
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_s64_f64 (float64x2_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzsv2df (__a, __b);
+}
+
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_u64_f64 (float64x2_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzuv2df_uss (__a, __b);
+}
+
/* vcvt (<u>int -> float) */
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtd_f64_s64 (int64_t __a)
{
return (float64_t) __a;
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtd_f64_u64 (uint64_t __a)
{
return (float64_t) __a;
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvts_f32_s32 (int32_t __a)
{
return (float32_t) __a;
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvts_f32_u32 (uint32_t __a)
{
return (float32_t) __a;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvt_f32_s32 (int32x2_t __a)
{
return __builtin_aarch64_floatv2siv2sf (__a);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvt_f32_u32 (uint32x2_t __a)
{
return __builtin_aarch64_floatunsv2siv2sf ((int32x2_t) __a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_f64_s64 (int64x1_t __a)
+{
+ return (float64x1_t) { vget_lane_s64 (__a, 0) };
+}
+
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_f64_u64 (uint64x1_t __a)
+{
+ return (float64x1_t) { vget_lane_u64 (__a, 0) };
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_f32_s32 (int32x4_t __a)
{
return __builtin_aarch64_floatv4siv4sf (__a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_f32_u32 (uint32x4_t __a)
{
return __builtin_aarch64_floatunsv4siv4sf ((int32x4_t) __a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_f64_s64 (int64x2_t __a)
{
return __builtin_aarch64_floatv2div2df (__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_f64_u64 (uint64x2_t __a)
{
return __builtin_aarch64_floatunsv2div2df ((int64x2_t) __a);
@@ -13170,73 +15105,85 @@ vcvtq_f64_u64 (uint64x2_t __a)
/* vcvt (float -> <u>int) */
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtd_s64_f64 (float64_t __a)
{
return (int64_t) __a;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtd_u64_f64 (float64_t __a)
{
return (uint64_t) __a;
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvts_s32_f32 (float32_t __a)
{
return (int32_t) __a;
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvts_u32_f32 (float32_t __a)
{
return (uint32_t) __a;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvt_s32_f32 (float32x2_t __a)
{
return __builtin_aarch64_lbtruncv2sfv2si (__a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvt_u32_f32 (float32x2_t __a)
{
return __builtin_aarch64_lbtruncuv2sfv2si_us (__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_s32_f32 (float32x4_t __a)
{
return __builtin_aarch64_lbtruncv4sfv4si (__a);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_u32_f32 (float32x4_t __a)
{
return __builtin_aarch64_lbtruncuv4sfv4si_us (__a);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvt_s64_f64 (float64x1_t __a)
{
return (int64x1_t) {vcvtd_s64_f64 (__a[0])};
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvt_u64_f64 (float64x1_t __a)
{
return (uint64x1_t) {vcvtd_u64_f64 (__a[0])};
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_s64_f64 (float64x2_t __a)
{
return __builtin_aarch64_lbtruncv2dfv2di (__a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_u64_f64 (float64x2_t __a)
{
return __builtin_aarch64_lbtruncuv2dfv2di_us (__a);
@@ -13244,73 +15191,85 @@ vcvtq_u64_f64 (float64x2_t __a)
/* vcvta */
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtad_s64_f64 (float64_t __a)
{
return __builtin_aarch64_lrounddfdi (__a);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtad_u64_f64 (float64_t __a)
{
return __builtin_aarch64_lroundudfdi_us (__a);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtas_s32_f32 (float32_t __a)
{
return __builtin_aarch64_lroundsfsi (__a);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtas_u32_f32 (float32_t __a)
{
return __builtin_aarch64_lroundusfsi_us (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvta_s32_f32 (float32x2_t __a)
{
return __builtin_aarch64_lroundv2sfv2si (__a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvta_u32_f32 (float32x2_t __a)
{
return __builtin_aarch64_lrounduv2sfv2si_us (__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtaq_s32_f32 (float32x4_t __a)
{
return __builtin_aarch64_lroundv4sfv4si (__a);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtaq_u32_f32 (float32x4_t __a)
{
return __builtin_aarch64_lrounduv4sfv4si_us (__a);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvta_s64_f64 (float64x1_t __a)
{
return (int64x1_t) {vcvtad_s64_f64 (__a[0])};
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvta_u64_f64 (float64x1_t __a)
{
return (uint64x1_t) {vcvtad_u64_f64 (__a[0])};
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtaq_s64_f64 (float64x2_t __a)
{
return __builtin_aarch64_lroundv2dfv2di (__a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtaq_u64_f64 (float64x2_t __a)
{
return __builtin_aarch64_lrounduv2dfv2di_us (__a);
@@ -13318,73 +15277,85 @@ vcvtaq_u64_f64 (float64x2_t __a)
/* vcvtm */
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtmd_s64_f64 (float64_t __a)
{
return __builtin_llfloor (__a);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtmd_u64_f64 (float64_t __a)
{
return __builtin_aarch64_lfloorudfdi_us (__a);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtms_s32_f32 (float32_t __a)
{
return __builtin_ifloorf (__a);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtms_u32_f32 (float32_t __a)
{
return __builtin_aarch64_lfloorusfsi_us (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtm_s32_f32 (float32x2_t __a)
{
return __builtin_aarch64_lfloorv2sfv2si (__a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtm_u32_f32 (float32x2_t __a)
{
return __builtin_aarch64_lflooruv2sfv2si_us (__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtmq_s32_f32 (float32x4_t __a)
{
return __builtin_aarch64_lfloorv4sfv4si (__a);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtmq_u32_f32 (float32x4_t __a)
{
return __builtin_aarch64_lflooruv4sfv4si_us (__a);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtm_s64_f64 (float64x1_t __a)
{
return (int64x1_t) {vcvtmd_s64_f64 (__a[0])};
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtm_u64_f64 (float64x1_t __a)
{
return (uint64x1_t) {vcvtmd_u64_f64 (__a[0])};
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtmq_s64_f64 (float64x2_t __a)
{
return __builtin_aarch64_lfloorv2dfv2di (__a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtmq_u64_f64 (float64x2_t __a)
{
return __builtin_aarch64_lflooruv2dfv2di_us (__a);
@@ -13392,73 +15363,85 @@ vcvtmq_u64_f64 (float64x2_t __a)
/* vcvtn */
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtnd_s64_f64 (float64_t __a)
{
return __builtin_aarch64_lfrintndfdi (__a);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtnd_u64_f64 (float64_t __a)
{
return __builtin_aarch64_lfrintnudfdi_us (__a);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtns_s32_f32 (float32_t __a)
{
return __builtin_aarch64_lfrintnsfsi (__a);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtns_u32_f32 (float32_t __a)
{
return __builtin_aarch64_lfrintnusfsi_us (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtn_s32_f32 (float32x2_t __a)
{
return __builtin_aarch64_lfrintnv2sfv2si (__a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtn_u32_f32 (float32x2_t __a)
{
return __builtin_aarch64_lfrintnuv2sfv2si_us (__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtnq_s32_f32 (float32x4_t __a)
{
return __builtin_aarch64_lfrintnv4sfv4si (__a);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtnq_u32_f32 (float32x4_t __a)
{
return __builtin_aarch64_lfrintnuv4sfv4si_us (__a);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtn_s64_f64 (float64x1_t __a)
{
return (int64x1_t) {vcvtnd_s64_f64 (__a[0])};
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtn_u64_f64 (float64x1_t __a)
{
return (uint64x1_t) {vcvtnd_u64_f64 (__a[0])};
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtnq_s64_f64 (float64x2_t __a)
{
return __builtin_aarch64_lfrintnv2dfv2di (__a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtnq_u64_f64 (float64x2_t __a)
{
return __builtin_aarch64_lfrintnuv2dfv2di_us (__a);
@@ -13466,73 +15449,85 @@ vcvtnq_u64_f64 (float64x2_t __a)
/* vcvtp */
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtpd_s64_f64 (float64_t __a)
{
return __builtin_llceil (__a);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtpd_u64_f64 (float64_t __a)
{
return __builtin_aarch64_lceiludfdi_us (__a);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtps_s32_f32 (float32_t __a)
{
return __builtin_iceilf (__a);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtps_u32_f32 (float32_t __a)
{
return __builtin_aarch64_lceilusfsi_us (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtp_s32_f32 (float32x2_t __a)
{
return __builtin_aarch64_lceilv2sfv2si (__a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtp_u32_f32 (float32x2_t __a)
{
return __builtin_aarch64_lceiluv2sfv2si_us (__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtpq_s32_f32 (float32x4_t __a)
{
return __builtin_aarch64_lceilv4sfv4si (__a);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtpq_u32_f32 (float32x4_t __a)
{
return __builtin_aarch64_lceiluv4sfv4si_us (__a);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtp_s64_f64 (float64x1_t __a)
{
return (int64x1_t) {vcvtpd_s64_f64 (__a[0])};
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtp_u64_f64 (float64x1_t __a)
{
return (uint64x1_t) {vcvtpd_u64_f64 (__a[0])};
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtpq_s64_f64 (float64x2_t __a)
{
return __builtin_aarch64_lceilv2dfv2di (__a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vcvtpq_u64_f64 (float64x2_t __a)
{
return __builtin_aarch64_lceiluv2dfv2di_us (__a);
@@ -13540,73 +15535,99 @@ vcvtpq_u64_f64 (float64x2_t __a)
/* vdup_n */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_n_f16 (float16_t __a)
+{
+ return (float16x4_t) {__a, __a, __a, __a};
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_f32 (float32_t __a)
{
return (float32x2_t) {__a, __a};
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_f64 (float64_t __a)
{
return (float64x1_t) {__a};
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_p8 (poly8_t __a)
{
return (poly8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_p16 (poly16_t __a)
{
return (poly16x4_t) {__a, __a, __a, __a};
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_n_p64 (poly64_t __a)
+{
+ return (poly64x1_t) {__a};
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_s8 (int8_t __a)
{
return (int8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_s16 (int16_t __a)
{
return (int16x4_t) {__a, __a, __a, __a};
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_s32 (int32_t __a)
{
return (int32x2_t) {__a, __a};
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_s64 (int64_t __a)
{
return (int64x1_t) {__a};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_u8 (uint8_t __a)
{
return (uint8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_u16 (uint16_t __a)
{
return (uint16x4_t) {__a, __a, __a, __a};
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_u32 (uint32_t __a)
{
return (uint32x2_t) {__a, __a};
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_u64 (uint64_t __a)
{
return (uint64x1_t) {__a};
@@ -13614,76 +15635,102 @@ vdup_n_u64 (uint64_t __a)
/* vdupq_n */
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_n_f16 (float16_t __a)
+{
+ return (float16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_f32 (float32_t __a)
{
return (float32x4_t) {__a, __a, __a, __a};
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_f64 (float64_t __a)
{
return (float64x2_t) {__a, __a};
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_p8 (uint32_t __a)
{
return (poly8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a,
__a, __a, __a, __a, __a, __a, __a, __a};
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_p16 (uint32_t __a)
{
return (poly16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_n_p64 (uint64_t __a)
+{
+ return (poly64x2_t) {__a, __a};
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_s8 (int32_t __a)
{
return (int8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a,
__a, __a, __a, __a, __a, __a, __a, __a};
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_s16 (int32_t __a)
{
return (int16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_s32 (int32_t __a)
{
return (int32x4_t) {__a, __a, __a, __a};
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_s64 (int64_t __a)
{
return (int64x2_t) {__a, __a};
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_u8 (uint32_t __a)
{
return (uint8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a,
__a, __a, __a, __a, __a, __a, __a, __a};
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_u16 (uint32_t __a)
{
return (uint16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_u32 (uint32_t __a)
{
return (uint32x4_t) {__a, __a, __a, __a};
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_u64 (uint64_t __a)
{
return (uint64x2_t) {__a, __a};
@@ -13691,73 +15738,99 @@ vdupq_n_u64 (uint64_t __a)
/* vdup_lane */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_lane_f16 (float16x4_t __a, const int __b)
+{
+ return __aarch64_vdup_lane_f16 (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_f32 (float32x2_t __a, const int __b)
{
return __aarch64_vdup_lane_f32 (__a, __b);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_f64 (float64x1_t __a, const int __b)
{
return __aarch64_vdup_lane_f64 (__a, __b);
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_p8 (poly8x8_t __a, const int __b)
{
return __aarch64_vdup_lane_p8 (__a, __b);
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_p16 (poly16x4_t __a, const int __b)
{
return __aarch64_vdup_lane_p16 (__a, __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_lane_p64 (poly64x1_t __a, const int __b)
+{
+ return __aarch64_vdup_lane_p64 (__a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_s8 (int8x8_t __a, const int __b)
{
return __aarch64_vdup_lane_s8 (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_s16 (int16x4_t __a, const int __b)
{
return __aarch64_vdup_lane_s16 (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_s32 (int32x2_t __a, const int __b)
{
return __aarch64_vdup_lane_s32 (__a, __b);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_s64 (int64x1_t __a, const int __b)
{
return __aarch64_vdup_lane_s64 (__a, __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_u8 (uint8x8_t __a, const int __b)
{
return __aarch64_vdup_lane_u8 (__a, __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_u16 (uint16x4_t __a, const int __b)
{
return __aarch64_vdup_lane_u16 (__a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_u32 (uint32x2_t __a, const int __b)
{
return __aarch64_vdup_lane_u32 (__a, __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_u64 (uint64x1_t __a, const int __b)
{
return __aarch64_vdup_lane_u64 (__a, __b);
@@ -13765,297 +15838,398 @@ vdup_lane_u64 (uint64x1_t __a, const int __b)
/* vdup_laneq */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_laneq_f16 (float16x8_t __a, const int __b)
+{
+ return __aarch64_vdup_laneq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_laneq_f32 (float32x4_t __a, const int __b)
{
return __aarch64_vdup_laneq_f32 (__a, __b);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_laneq_f64 (float64x2_t __a, const int __b)
{
return __aarch64_vdup_laneq_f64 (__a, __b);
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_laneq_p8 (poly8x16_t __a, const int __b)
{
return __aarch64_vdup_laneq_p8 (__a, __b);
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_laneq_p16 (poly16x8_t __a, const int __b)
{
return __aarch64_vdup_laneq_p16 (__a, __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdup_laneq_p64 (poly64x2_t __a, const int __b)
+{
+ return __aarch64_vdup_laneq_p64 (__a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_laneq_s8 (int8x16_t __a, const int __b)
{
return __aarch64_vdup_laneq_s8 (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_laneq_s16 (int16x8_t __a, const int __b)
{
return __aarch64_vdup_laneq_s16 (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_laneq_s32 (int32x4_t __a, const int __b)
{
return __aarch64_vdup_laneq_s32 (__a, __b);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_laneq_s64 (int64x2_t __a, const int __b)
{
return __aarch64_vdup_laneq_s64 (__a, __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_laneq_u8 (uint8x16_t __a, const int __b)
{
return __aarch64_vdup_laneq_u8 (__a, __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_laneq_u16 (uint16x8_t __a, const int __b)
{
return __aarch64_vdup_laneq_u16 (__a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_laneq_u32 (uint32x4_t __a, const int __b)
{
return __aarch64_vdup_laneq_u32 (__a, __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdup_laneq_u64 (uint64x2_t __a, const int __b)
{
return __aarch64_vdup_laneq_u64 (__a, __b);
}
/* vdupq_lane */
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_lane_f16 (float16x4_t __a, const int __b)
+{
+ return __aarch64_vdupq_lane_f16 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_f32 (float32x2_t __a, const int __b)
{
return __aarch64_vdupq_lane_f32 (__a, __b);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_f64 (float64x1_t __a, const int __b)
{
return __aarch64_vdupq_lane_f64 (__a, __b);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_p8 (poly8x8_t __a, const int __b)
{
return __aarch64_vdupq_lane_p8 (__a, __b);
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_p16 (poly16x4_t __a, const int __b)
{
return __aarch64_vdupq_lane_p16 (__a, __b);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_lane_p64 (poly64x1_t __a, const int __b)
+{
+ return __aarch64_vdupq_lane_p64 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_s8 (int8x8_t __a, const int __b)
{
return __aarch64_vdupq_lane_s8 (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_s16 (int16x4_t __a, const int __b)
{
return __aarch64_vdupq_lane_s16 (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_s32 (int32x2_t __a, const int __b)
{
return __aarch64_vdupq_lane_s32 (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_s64 (int64x1_t __a, const int __b)
{
return __aarch64_vdupq_lane_s64 (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_u8 (uint8x8_t __a, const int __b)
{
return __aarch64_vdupq_lane_u8 (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_u16 (uint16x4_t __a, const int __b)
{
return __aarch64_vdupq_lane_u16 (__a, __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_u32 (uint32x2_t __a, const int __b)
{
return __aarch64_vdupq_lane_u32 (__a, __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_u64 (uint64x1_t __a, const int __b)
{
return __aarch64_vdupq_lane_u64 (__a, __b);
}
/* vdupq_laneq */
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_laneq_f16 (float16x8_t __a, const int __b)
+{
+ return __aarch64_vdupq_laneq_f16 (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_laneq_f32 (float32x4_t __a, const int __b)
{
return __aarch64_vdupq_laneq_f32 (__a, __b);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_laneq_f64 (float64x2_t __a, const int __b)
{
return __aarch64_vdupq_laneq_f64 (__a, __b);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_laneq_p8 (poly8x16_t __a, const int __b)
{
return __aarch64_vdupq_laneq_p8 (__a, __b);
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_laneq_p16 (poly16x8_t __a, const int __b)
{
return __aarch64_vdupq_laneq_p16 (__a, __b);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupq_laneq_p64 (poly64x2_t __a, const int __b)
+{
+ return __aarch64_vdupq_laneq_p64 (__a, __b);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_laneq_s8 (int8x16_t __a, const int __b)
{
return __aarch64_vdupq_laneq_s8 (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_laneq_s16 (int16x8_t __a, const int __b)
{
return __aarch64_vdupq_laneq_s16 (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_laneq_s32 (int32x4_t __a, const int __b)
{
return __aarch64_vdupq_laneq_s32 (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_laneq_s64 (int64x2_t __a, const int __b)
{
return __aarch64_vdupq_laneq_s64 (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_laneq_u8 (uint8x16_t __a, const int __b)
{
return __aarch64_vdupq_laneq_u8 (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_laneq_u16 (uint16x8_t __a, const int __b)
{
return __aarch64_vdupq_laneq_u16 (__a, __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_laneq_u32 (uint32x4_t __a, const int __b)
{
return __aarch64_vdupq_laneq_u32 (__a, __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupq_laneq_u64 (uint64x2_t __a, const int __b)
{
return __aarch64_vdupq_laneq_u64 (__a, __b);
}
/* vdupb_lane */
-__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupb_lane_p8 (poly8x8_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupb_lane_s8 (int8x8_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupb_lane_u8 (uint8x8_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
/* vduph_lane */
-__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vduph_lane_f16 (float16x4_t __a, const int __b)
+{
+ return __aarch64_vget_lane_any (__a, __b);
+}
+
+__extension__ extern __inline poly16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vduph_lane_p16 (poly16x4_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vduph_lane_s16 (int16x4_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vduph_lane_u16 (uint16x4_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
/* vdups_lane */
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdups_lane_f32 (float32x2_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdups_lane_s32 (int32x2_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdups_lane_u32 (uint32x2_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
/* vdupd_lane */
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupd_lane_f64 (float64x1_t __a, const int __b)
{
__AARCH64_LANE_CHECK (__a, __b);
return __a[0];
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupd_lane_s64 (int64x1_t __a, const int __b)
{
__AARCH64_LANE_CHECK (__a, __b);
return __a[0];
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupd_lane_u64 (uint64x1_t __a, const int __b)
{
__AARCH64_LANE_CHECK (__a, __b);
@@ -14063,76 +16237,97 @@ vdupd_lane_u64 (uint64x1_t __a, const int __b)
}
/* vdupb_laneq */
-__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupb_laneq_p8 (poly8x16_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
-vdupb_laneq_s8 (int8x16_t __a, const int __attribute__ ((unused)) __b)
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdupb_laneq_s8 (int8x16_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupb_laneq_u8 (uint8x16_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
/* vduph_laneq */
-__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vduph_laneq_f16 (float16x8_t __a, const int __b)
+{
+ return __aarch64_vget_lane_any (__a, __b);
+}
+
+__extension__ extern __inline poly16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vduph_laneq_p16 (poly16x8_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vduph_laneq_s16 (int16x8_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vduph_laneq_u16 (uint16x8_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
/* vdups_laneq */
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdups_laneq_f32 (float32x4_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdups_laneq_s32 (int32x4_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdups_laneq_u32 (uint32x4_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
/* vdupd_laneq */
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupd_laneq_f64 (float64x2_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupd_laneq_s64 (int64x2_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vdupd_laneq_u64 (uint64x2_t __a, const int __b)
{
return __aarch64_vget_lane_any (__a, __b);
@@ -14140,7 +16335,22 @@ vdupd_laneq_u64 (uint64x2_t __a, const int __b)
/* vext */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vext_f16 (float16x4_t __a, float16x4_t __b, __const int __c)
+{
+ __AARCH64_LANE_CHECK (__a, __c);
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__b, __a,
+ (uint16x4_t) {4 - __c, 5 - __c, 6 - __c, 7 - __c});
+#else
+ return __builtin_shuffle (__a, __b,
+ (uint16x4_t) {__c, __c + 1, __c + 2, __c + 3});
+#endif
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vext_f32 (float32x2_t __a, float32x2_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14151,14 +16361,16 @@ vext_f32 (float32x2_t __a, float32x2_t __b, __const int __c)
#endif
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vext_f64 (float64x1_t __a, float64x1_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
/* The only possible index to the assembler instruction returns element 0. */
return __a;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vext_p8 (poly8x8_t __a, poly8x8_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14171,7 +16383,8 @@ vext_p8 (poly8x8_t __a, poly8x8_t __b, __const int __c)
#endif
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vext_p16 (poly16x4_t __a, poly16x4_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14183,7 +16396,17 @@ vext_p16 (poly16x4_t __a, poly16x4_t __b, __const int __c)
#endif
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vext_p64 (poly64x1_t __a, poly64x1_t __b, __const int __c)
+{
+ __AARCH64_LANE_CHECK (__a, __c);
+ /* The only possible index to the assembler instruction returns element 0. */
+ return __a;
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vext_s8 (int8x8_t __a, int8x8_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14196,7 +16419,8 @@ vext_s8 (int8x8_t __a, int8x8_t __b, __const int __c)
#endif
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vext_s16 (int16x4_t __a, int16x4_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14208,7 +16432,8 @@ vext_s16 (int16x4_t __a, int16x4_t __b, __const int __c)
#endif
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vext_s32 (int32x2_t __a, int32x2_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14219,7 +16444,8 @@ vext_s32 (int32x2_t __a, int32x2_t __b, __const int __c)
#endif
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vext_s64 (int64x1_t __a, int64x1_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14227,7 +16453,8 @@ vext_s64 (int64x1_t __a, int64x1_t __b, __const int __c)
return __a;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vext_u8 (uint8x8_t __a, uint8x8_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14240,7 +16467,8 @@ vext_u8 (uint8x8_t __a, uint8x8_t __b, __const int __c)
#endif
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vext_u16 (uint16x4_t __a, uint16x4_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14252,7 +16480,8 @@ vext_u16 (uint16x4_t __a, uint16x4_t __b, __const int __c)
#endif
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vext_u32 (uint32x2_t __a, uint32x2_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14263,7 +16492,8 @@ vext_u32 (uint32x2_t __a, uint32x2_t __b, __const int __c)
#endif
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vext_u64 (uint64x1_t __a, uint64x1_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14271,7 +16501,25 @@ vext_u64 (uint64x1_t __a, uint64x1_t __b, __const int __c)
return __a;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vextq_f16 (float16x8_t __a, float16x8_t __b, __const int __c)
+{
+ __AARCH64_LANE_CHECK (__a, __c);
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__b, __a,
+ (uint16x8_t) {8 - __c, 9 - __c, 10 - __c, 11 - __c,
+ 12 - __c, 13 - __c, 14 - __c,
+ 15 - __c});
+#else
+ return __builtin_shuffle (__a, __b,
+ (uint16x8_t) {__c, __c + 1, __c + 2, __c + 3,
+ __c + 4, __c + 5, __c + 6, __c + 7});
+#endif
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vextq_f32 (float32x4_t __a, float32x4_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14283,7 +16531,8 @@ vextq_f32 (float32x4_t __a, float32x4_t __b, __const int __c)
#endif
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vextq_f64 (float64x2_t __a, float64x2_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14294,7 +16543,8 @@ vextq_f64 (float64x2_t __a, float64x2_t __b, __const int __c)
#endif
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vextq_p8 (poly8x16_t __a, poly8x16_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14309,7 +16559,8 @@ vextq_p8 (poly8x16_t __a, poly8x16_t __b, __const int __c)
#endif
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vextq_p16 (poly16x8_t __a, poly16x8_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14322,7 +16573,20 @@ vextq_p16 (poly16x8_t __a, poly16x8_t __b, __const int __c)
#endif
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vextq_p64 (poly64x2_t __a, poly64x2_t __b, __const int __c)
+{
+ __AARCH64_LANE_CHECK (__a, __c);
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__b, __a, (uint64x2_t) {2-__c, 3-__c});
+#else
+ return __builtin_shuffle (__a, __b, (uint64x2_t) {__c, __c+1});
+#endif
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vextq_s8 (int8x16_t __a, int8x16_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14337,7 +16601,8 @@ vextq_s8 (int8x16_t __a, int8x16_t __b, __const int __c)
#endif
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vextq_s16 (int16x8_t __a, int16x8_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14350,7 +16615,8 @@ vextq_s16 (int16x8_t __a, int16x8_t __b, __const int __c)
#endif
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vextq_s32 (int32x4_t __a, int32x4_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14362,7 +16628,8 @@ vextq_s32 (int32x4_t __a, int32x4_t __b, __const int __c)
#endif
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vextq_s64 (int64x2_t __a, int64x2_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14373,7 +16640,8 @@ vextq_s64 (int64x2_t __a, int64x2_t __b, __const int __c)
#endif
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vextq_u8 (uint8x16_t __a, uint8x16_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14388,7 +16656,8 @@ vextq_u8 (uint8x16_t __a, uint8x16_t __b, __const int __c)
#endif
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vextq_u16 (uint16x8_t __a, uint16x8_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14401,7 +16670,8 @@ vextq_u16 (uint16x8_t __a, uint16x8_t __b, __const int __c)
#endif
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vextq_u32 (uint32x4_t __a, uint32x4_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14413,7 +16683,8 @@ vextq_u32 (uint32x4_t __a, uint32x4_t __b, __const int __c)
#endif
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vextq_u64 (uint64x2_t __a, uint64x2_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
@@ -14426,43 +16697,57 @@ vextq_u64 (uint64x2_t __a, uint64x2_t __b, __const int __c)
/* vfma */
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfma_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c)
{
return (float64x1_t) {__builtin_fma (__b[0], __c[0], __a[0])};
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfma_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
{
return __builtin_aarch64_fmav2sf (__b, __c, __a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
{
return __builtin_aarch64_fmav4sf (__b, __c, __a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmaq_f64 (float64x2_t __a, float64x2_t __b, float64x2_t __c)
{
return __builtin_aarch64_fmav2df (__b, __c, __a);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfma_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
{
return __builtin_aarch64_fmav2sf (__b, vdup_n_f32 (__c), __a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfma_n_f64 (float64x1_t __a, float64x1_t __b, float64_t __c)
+{
+ return (float64x1_t) {__b[0] * __c + __a[0]};
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
{
return __builtin_aarch64_fmav4sf (__b, vdupq_n_f32 (__c), __a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmaq_n_f64 (float64x2_t __a, float64x2_t __b, float64_t __c)
{
return __builtin_aarch64_fmav2df (__b, vdupq_n_f64 (__c), __a);
@@ -14470,7 +16755,8 @@ vfmaq_n_f64 (float64x2_t __a, float64x2_t __b, float64_t __c)
/* vfma_lane */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfma_lane_f32 (float32x2_t __a, float32x2_t __b,
float32x2_t __c, const int __lane)
{
@@ -14479,21 +16765,24 @@ vfma_lane_f32 (float32x2_t __a, float32x2_t __b,
__a);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfma_lane_f64 (float64x1_t __a, float64x1_t __b,
float64x1_t __c, const int __lane)
{
return (float64x1_t) {__builtin_fma (__b[0], __c[0], __a[0])};
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmad_lane_f64 (float64_t __a, float64_t __b,
float64x1_t __c, const int __lane)
{
return __builtin_fma (__b, __c[0], __a);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmas_lane_f32 (float32_t __a, float32_t __b,
float32x2_t __c, const int __lane)
{
@@ -14502,7 +16791,8 @@ vfmas_lane_f32 (float32_t __a, float32_t __b,
/* vfma_laneq */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfma_laneq_f32 (float32x2_t __a, float32x2_t __b,
float32x4_t __c, const int __lane)
{
@@ -14511,7 +16801,8 @@ vfma_laneq_f32 (float32x2_t __a, float32x2_t __b,
__a);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfma_laneq_f64 (float64x1_t __a, float64x1_t __b,
float64x2_t __c, const int __lane)
{
@@ -14519,14 +16810,16 @@ vfma_laneq_f64 (float64x1_t __a, float64x1_t __b,
return (float64x1_t) {__builtin_fma (__b[0], __c0, __a[0])};
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmad_laneq_f64 (float64_t __a, float64_t __b,
float64x2_t __c, const int __lane)
{
return __builtin_fma (__b, __aarch64_vget_lane_any (__c, __lane), __a);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmas_laneq_f32 (float32_t __a, float32_t __b,
float32x4_t __c, const int __lane)
{
@@ -14535,7 +16828,8 @@ vfmas_laneq_f32 (float32_t __a, float32_t __b,
/* vfmaq_lane */
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmaq_lane_f32 (float32x4_t __a, float32x4_t __b,
float32x2_t __c, const int __lane)
{
@@ -14544,7 +16838,8 @@ vfmaq_lane_f32 (float32x4_t __a, float32x4_t __b,
__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmaq_lane_f64 (float64x2_t __a, float64x2_t __b,
float64x1_t __c, const int __lane)
{
@@ -14553,7 +16848,8 @@ vfmaq_lane_f64 (float64x2_t __a, float64x2_t __b,
/* vfmaq_laneq */
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmaq_laneq_f32 (float32x4_t __a, float32x4_t __b,
float32x4_t __c, const int __lane)
{
@@ -14562,7 +16858,8 @@ vfmaq_laneq_f32 (float32x4_t __a, float32x4_t __b,
__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmaq_laneq_f64 (float64x2_t __a, float64x2_t __b,
float64x2_t __c, const int __lane)
{
@@ -14573,34 +16870,66 @@ vfmaq_laneq_f64 (float64x2_t __a, float64x2_t __b,
/* vfms */
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfms_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c)
{
return (float64x1_t) {__builtin_fma (-__b[0], __c[0], __a[0])};
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfms_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
{
return __builtin_aarch64_fmav2sf (-__b, __c, __a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
{
return __builtin_aarch64_fmav4sf (-__b, __c, __a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmsq_f64 (float64x2_t __a, float64x2_t __b, float64x2_t __c)
{
return __builtin_aarch64_fmav2df (-__b, __c, __a);
}
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfms_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
+{
+ return __builtin_aarch64_fmav2sf (-__b, vdup_n_f32 (__c), __a);
+}
+
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfms_n_f64 (float64x1_t __a, float64x1_t __b, float64_t __c)
+{
+ return (float64x1_t) {-__b[0] * __c + __a[0]};
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
+{
+ return __builtin_aarch64_fmav4sf (-__b, vdupq_n_f32 (__c), __a);
+}
+
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmsq_n_f64 (float64x2_t __a, float64x2_t __b, float64_t __c)
+{
+ return __builtin_aarch64_fmav2df (-__b, vdupq_n_f64 (__c), __a);
+}
/* vfms_lane */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfms_lane_f32 (float32x2_t __a, float32x2_t __b,
float32x2_t __c, const int __lane)
{
@@ -14609,21 +16938,24 @@ vfms_lane_f32 (float32x2_t __a, float32x2_t __b,
__a);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfms_lane_f64 (float64x1_t __a, float64x1_t __b,
float64x1_t __c, const int __lane)
{
return (float64x1_t) {__builtin_fma (-__b[0], __c[0], __a[0])};
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmsd_lane_f64 (float64_t __a, float64_t __b,
float64x1_t __c, const int __lane)
{
return __builtin_fma (-__b, __c[0], __a);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmss_lane_f32 (float32_t __a, float32_t __b,
float32x2_t __c, const int __lane)
{
@@ -14632,7 +16964,8 @@ vfmss_lane_f32 (float32_t __a, float32_t __b,
/* vfms_laneq */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfms_laneq_f32 (float32x2_t __a, float32x2_t __b,
float32x4_t __c, const int __lane)
{
@@ -14641,7 +16974,8 @@ vfms_laneq_f32 (float32x2_t __a, float32x2_t __b,
__a);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfms_laneq_f64 (float64x1_t __a, float64x1_t __b,
float64x2_t __c, const int __lane)
{
@@ -14649,14 +16983,16 @@ vfms_laneq_f64 (float64x1_t __a, float64x1_t __b,
return (float64x1_t) {__builtin_fma (-__b[0], __c0, __a[0])};
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmsd_laneq_f64 (float64_t __a, float64_t __b,
float64x2_t __c, const int __lane)
{
return __builtin_fma (-__b, __aarch64_vget_lane_any (__c, __lane), __a);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmss_laneq_f32 (float32_t __a, float32_t __b,
float32x4_t __c, const int __lane)
{
@@ -14665,7 +17001,8 @@ vfmss_laneq_f32 (float32_t __a, float32_t __b,
/* vfmsq_lane */
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmsq_lane_f32 (float32x4_t __a, float32x4_t __b,
float32x2_t __c, const int __lane)
{
@@ -14674,7 +17011,8 @@ vfmsq_lane_f32 (float32x4_t __a, float32x4_t __b,
__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmsq_lane_f64 (float64x2_t __a, float64x2_t __b,
float64x1_t __c, const int __lane)
{
@@ -14683,7 +17021,8 @@ vfmsq_lane_f64 (float64x2_t __a, float64x2_t __b,
/* vfmsq_laneq */
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmsq_laneq_f32 (float32x4_t __a, float32x4_t __b,
float32x4_t __c, const int __lane)
{
@@ -14692,7 +17031,8 @@ vfmsq_laneq_f32 (float32x4_t __a, float32x4_t __b,
__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vfmsq_laneq_f64 (float64x2_t __a, float64x2_t __b,
float64x2_t __c, const int __lane)
{
@@ -14703,84 +17043,104 @@ vfmsq_laneq_f64 (float64x2_t __a, float64x2_t __b,
/* vld1 */
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_f16 (const float16_t *__a)
{
return __builtin_aarch64_ld1v4hf (__a);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_f32 (const float32_t *a)
{
return __builtin_aarch64_ld1v2sf ((const __builtin_aarch64_simd_sf *) a);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_f64 (const float64_t *a)
{
return (float64x1_t) {*a};
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_p8 (const poly8_t *a)
{
return (poly8x8_t)
__builtin_aarch64_ld1v8qi ((const __builtin_aarch64_simd_qi *) a);
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_p16 (const poly16_t *a)
{
return (poly16x4_t)
__builtin_aarch64_ld1v4hi ((const __builtin_aarch64_simd_hi *) a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_p64 (const poly64_t *a)
+{
+ return (poly64x1_t) {*a};
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_s8 (const int8_t *a)
{
return __builtin_aarch64_ld1v8qi ((const __builtin_aarch64_simd_qi *) a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_s16 (const int16_t *a)
{
return __builtin_aarch64_ld1v4hi ((const __builtin_aarch64_simd_hi *) a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_s32 (const int32_t *a)
{
return __builtin_aarch64_ld1v2si ((const __builtin_aarch64_simd_si *) a);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_s64 (const int64_t *a)
{
return (int64x1_t) {*a};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_u8 (const uint8_t *a)
{
return (uint8x8_t)
__builtin_aarch64_ld1v8qi ((const __builtin_aarch64_simd_qi *) a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_u16 (const uint16_t *a)
{
return (uint16x4_t)
__builtin_aarch64_ld1v4hi ((const __builtin_aarch64_simd_hi *) a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_u32 (const uint32_t *a)
{
return (uint32x2_t)
__builtin_aarch64_ld1v2si ((const __builtin_aarch64_simd_si *) a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_u64 (const uint64_t *a)
{
return (uint64x1_t) {*a};
@@ -14788,84 +17148,105 @@ vld1_u64 (const uint64_t *a)
/* vld1q */
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_f16 (const float16_t *__a)
{
return __builtin_aarch64_ld1v8hf (__a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_f32 (const float32_t *a)
{
return __builtin_aarch64_ld1v4sf ((const __builtin_aarch64_simd_sf *) a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_f64 (const float64_t *a)
{
return __builtin_aarch64_ld1v2df ((const __builtin_aarch64_simd_df *) a);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_p8 (const poly8_t *a)
{
return (poly8x16_t)
__builtin_aarch64_ld1v16qi ((const __builtin_aarch64_simd_qi *) a);
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_p16 (const poly16_t *a)
{
return (poly16x8_t)
__builtin_aarch64_ld1v8hi ((const __builtin_aarch64_simd_hi *) a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_p64 (const poly64_t *a)
+{
+ return (poly64x2_t)
+ __builtin_aarch64_ld1v2di ((const __builtin_aarch64_simd_di *) a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_s8 (const int8_t *a)
{
return __builtin_aarch64_ld1v16qi ((const __builtin_aarch64_simd_qi *) a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_s16 (const int16_t *a)
{
return __builtin_aarch64_ld1v8hi ((const __builtin_aarch64_simd_hi *) a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_s32 (const int32_t *a)
{
return __builtin_aarch64_ld1v4si ((const __builtin_aarch64_simd_si *) a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_s64 (const int64_t *a)
{
return __builtin_aarch64_ld1v2di ((const __builtin_aarch64_simd_di *) a);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_u8 (const uint8_t *a)
{
return (uint8x16_t)
__builtin_aarch64_ld1v16qi ((const __builtin_aarch64_simd_qi *) a);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_u16 (const uint16_t *a)
{
return (uint16x8_t)
__builtin_aarch64_ld1v8hi ((const __builtin_aarch64_simd_hi *) a);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_u32 (const uint32_t *a)
{
return (uint32x4_t)
__builtin_aarch64_ld1v4si ((const __builtin_aarch64_simd_si *) a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_u64 (const uint64_t *a)
{
return (uint64x2_t)
@@ -14874,80 +17255,99 @@ vld1q_u64 (const uint64_t *a)
/* vld1_dup */
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_f16 (const float16_t* __a)
{
- float16_t __f = *__a;
- return (float16x4_t) { __f, __f, __f, __f };
+ return vdup_n_f16 (*__a);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_f32 (const float32_t* __a)
{
return vdup_n_f32 (*__a);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_f64 (const float64_t* __a)
{
return vdup_n_f64 (*__a);
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_p8 (const poly8_t* __a)
{
return vdup_n_p8 (*__a);
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_p16 (const poly16_t* __a)
{
return vdup_n_p16 (*__a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_dup_p64 (const poly64_t* __a)
+{
+ return vdup_n_p64 (*__a);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_s8 (const int8_t* __a)
{
return vdup_n_s8 (*__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_s16 (const int16_t* __a)
{
return vdup_n_s16 (*__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_s32 (const int32_t* __a)
{
return vdup_n_s32 (*__a);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_s64 (const int64_t* __a)
{
return vdup_n_s64 (*__a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_u8 (const uint8_t* __a)
{
return vdup_n_u8 (*__a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_u16 (const uint16_t* __a)
{
return vdup_n_u16 (*__a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_u32 (const uint32_t* __a)
{
return vdup_n_u32 (*__a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_u64 (const uint64_t* __a)
{
return vdup_n_u64 (*__a);
@@ -14955,80 +17355,99 @@ vld1_dup_u64 (const uint64_t* __a)
/* vld1q_dup */
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_f16 (const float16_t* __a)
{
- float16_t __f = *__a;
- return (float16x8_t) { __f, __f, __f, __f, __f, __f, __f, __f };
+ return vdupq_n_f16 (*__a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_f32 (const float32_t* __a)
{
return vdupq_n_f32 (*__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_f64 (const float64_t* __a)
{
return vdupq_n_f64 (*__a);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_p8 (const poly8_t* __a)
{
return vdupq_n_p8 (*__a);
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_p16 (const poly16_t* __a)
{
return vdupq_n_p16 (*__a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_dup_p64 (const poly64_t* __a)
+{
+ return vdupq_n_p64 (*__a);
+}
+
+ __extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_s8 (const int8_t* __a)
{
return vdupq_n_s8 (*__a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_s16 (const int16_t* __a)
{
return vdupq_n_s16 (*__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_s32 (const int32_t* __a)
{
return vdupq_n_s32 (*__a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_s64 (const int64_t* __a)
{
return vdupq_n_s64 (*__a);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_u8 (const uint8_t* __a)
{
return vdupq_n_u8 (*__a);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_u16 (const uint16_t* __a)
{
return vdupq_n_u16 (*__a);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_u32 (const uint32_t* __a)
{
return vdupq_n_u32 (*__a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_u64 (const uint64_t* __a)
{
return vdupq_n_u64 (*__a);
@@ -15036,79 +17455,99 @@ vld1q_dup_u64 (const uint64_t* __a)
/* vld1_lane */
-__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_f16 (const float16_t *__src, float16x4_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_f32 (const float32_t *__src, float32x2_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_f64 (const float64_t *__src, float64x1_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_p8 (const poly8_t *__src, poly8x8_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_p16 (const poly16_t *__src, poly16x4_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1_lane_p64 (const poly64_t *__src, poly64x1_t __vec, const int __lane)
+{
+ return __aarch64_vset_lane_any (*__src, __vec, __lane);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_s8 (const int8_t *__src, int8x8_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_s16 (const int16_t *__src, int16x4_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_s32 (const int32_t *__src, int32x2_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_s64 (const int64_t *__src, int64x1_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_u8 (const uint8_t *__src, uint8x8_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_u16 (const uint16_t *__src, uint16x4_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_u32 (const uint32_t *__src, uint32x2_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_u64 (const uint64_t *__src, uint64x1_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
@@ -15116,79 +17555,99 @@ vld1_lane_u64 (const uint64_t *__src, uint64x1_t __vec, const int __lane)
/* vld1q_lane */
-__extension__ static __inline float16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_f16 (const float16_t *__src, float16x8_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_f32 (const float32_t *__src, float32x4_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_f64 (const float64_t *__src, float64x2_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_p8 (const poly8_t *__src, poly8x16_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_p16 (const poly16_t *__src, poly16x8_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld1q_lane_p64 (const poly64_t *__src, poly64x2_t __vec, const int __lane)
+{
+ return __aarch64_vset_lane_any (*__src, __vec, __lane);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_s8 (const int8_t *__src, int8x16_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_s16 (const int16_t *__src, int16x8_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_s32 (const int32_t *__src, int32x4_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_s64 (const int64_t *__src, int64x2_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_u8 (const uint8_t *__src, uint8x16_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_u16 (const uint16_t *__src, uint16x8_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_u32 (const uint32_t *__src, uint32x4_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_u64 (const uint64_t *__src, uint64x2_t __vec, const int __lane)
{
return __aarch64_vset_lane_any (*__src, __vec, __lane);
@@ -15196,7 +17655,8 @@ vld1q_lane_u64 (const uint64_t *__src, uint64x2_t __vec, const int __lane)
/* vldn */
-__extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_s64 (const int64_t * __a)
{
int64x1x2_t ret;
@@ -15207,7 +17667,8 @@ vld2_s64 (const int64_t * __a)
return ret;
}
-__extension__ static __inline uint64x1x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_u64 (const uint64_t * __a)
{
uint64x1x2_t ret;
@@ -15218,7 +17679,8 @@ vld2_u64 (const uint64_t * __a)
return ret;
}
-__extension__ static __inline float64x1x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_f64 (const float64_t * __a)
{
float64x1x2_t ret;
@@ -15229,7 +17691,8 @@ vld2_f64 (const float64_t * __a)
return ret;
}
-__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_s8 (const int8_t * __a)
{
int8x8x2_t ret;
@@ -15240,7 +17703,8 @@ vld2_s8 (const int8_t * __a)
return ret;
}
-__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_p8 (const poly8_t * __a)
{
poly8x8x2_t ret;
@@ -15251,7 +17715,20 @@ vld2_p8 (const poly8_t * __a)
return ret;
}
-__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_p64 (const poly64_t * __a)
+{
+ poly64x1x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregoidi_pss (__o, 0);
+ ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregoidi_pss (__o, 1);
+ return ret;
+}
+
+__extension__ extern __inline int16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_s16 (const int16_t * __a)
{
int16x4x2_t ret;
@@ -15262,7 +17739,8 @@ vld2_s16 (const int16_t * __a)
return ret;
}
-__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_p16 (const poly16_t * __a)
{
poly16x4x2_t ret;
@@ -15273,7 +17751,8 @@ vld2_p16 (const poly16_t * __a)
return ret;
}
-__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_s32 (const int32_t * __a)
{
int32x2x2_t ret;
@@ -15284,7 +17763,8 @@ vld2_s32 (const int32_t * __a)
return ret;
}
-__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_u8 (const uint8_t * __a)
{
uint8x8x2_t ret;
@@ -15295,7 +17775,8 @@ vld2_u8 (const uint8_t * __a)
return ret;
}
-__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_u16 (const uint16_t * __a)
{
uint16x4x2_t ret;
@@ -15306,7 +17787,8 @@ vld2_u16 (const uint16_t * __a)
return ret;
}
-__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_u32 (const uint32_t * __a)
{
uint32x2x2_t ret;
@@ -15317,7 +17799,8 @@ vld2_u32 (const uint32_t * __a)
return ret;
}
-__extension__ static __inline float16x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_f16 (const float16_t * __a)
{
float16x4x2_t ret;
@@ -15328,7 +17811,8 @@ vld2_f16 (const float16_t * __a)
return ret;
}
-__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_f32 (const float32_t * __a)
{
float32x2x2_t ret;
@@ -15339,7 +17823,8 @@ vld2_f32 (const float32_t * __a)
return ret;
}
-__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_s8 (const int8_t * __a)
{
int8x16x2_t ret;
@@ -15350,7 +17835,8 @@ vld2q_s8 (const int8_t * __a)
return ret;
}
-__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_p8 (const poly8_t * __a)
{
poly8x16x2_t ret;
@@ -15361,7 +17847,8 @@ vld2q_p8 (const poly8_t * __a)
return ret;
}
-__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_s16 (const int16_t * __a)
{
int16x8x2_t ret;
@@ -15372,7 +17859,8 @@ vld2q_s16 (const int16_t * __a)
return ret;
}
-__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_p16 (const poly16_t * __a)
{
poly16x8x2_t ret;
@@ -15383,7 +17871,20 @@ vld2q_p16 (const poly16_t * __a)
return ret;
}
-__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_p64 (const poly64_t * __a)
+{
+ poly64x2x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2v2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregoiv2di_pss (__o, 0);
+ ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregoiv2di_pss (__o, 1);
+ return ret;
+}
+
+__extension__ extern __inline int32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_s32 (const int32_t * __a)
{
int32x4x2_t ret;
@@ -15394,7 +17895,8 @@ vld2q_s32 (const int32_t * __a)
return ret;
}
-__extension__ static __inline int64x2x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_s64 (const int64_t * __a)
{
int64x2x2_t ret;
@@ -15405,7 +17907,8 @@ vld2q_s64 (const int64_t * __a)
return ret;
}
-__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_u8 (const uint8_t * __a)
{
uint8x16x2_t ret;
@@ -15416,7 +17919,8 @@ vld2q_u8 (const uint8_t * __a)
return ret;
}
-__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_u16 (const uint16_t * __a)
{
uint16x8x2_t ret;
@@ -15427,7 +17931,8 @@ vld2q_u16 (const uint16_t * __a)
return ret;
}
-__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_u32 (const uint32_t * __a)
{
uint32x4x2_t ret;
@@ -15438,7 +17943,8 @@ vld2q_u32 (const uint32_t * __a)
return ret;
}
-__extension__ static __inline uint64x2x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_u64 (const uint64_t * __a)
{
uint64x2x2_t ret;
@@ -15449,7 +17955,8 @@ vld2q_u64 (const uint64_t * __a)
return ret;
}
-__extension__ static __inline float16x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_f16 (const float16_t * __a)
{
float16x8x2_t ret;
@@ -15460,7 +17967,8 @@ vld2q_f16 (const float16_t * __a)
return ret;
}
-__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_f32 (const float32_t * __a)
{
float32x4x2_t ret;
@@ -15471,7 +17979,8 @@ vld2q_f32 (const float32_t * __a)
return ret;
}
-__extension__ static __inline float64x2x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_f64 (const float64_t * __a)
{
float64x2x2_t ret;
@@ -15482,7 +17991,8 @@ vld2q_f64 (const float64_t * __a)
return ret;
}
-__extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_s64 (const int64_t * __a)
{
int64x1x3_t ret;
@@ -15494,7 +18004,8 @@ vld3_s64 (const int64_t * __a)
return ret;
}
-__extension__ static __inline uint64x1x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_u64 (const uint64_t * __a)
{
uint64x1x3_t ret;
@@ -15506,7 +18017,8 @@ vld3_u64 (const uint64_t * __a)
return ret;
}
-__extension__ static __inline float64x1x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_f64 (const float64_t * __a)
{
float64x1x3_t ret;
@@ -15518,7 +18030,8 @@ vld3_f64 (const float64_t * __a)
return ret;
}
-__extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_s8 (const int8_t * __a)
{
int8x8x3_t ret;
@@ -15530,7 +18043,8 @@ vld3_s8 (const int8_t * __a)
return ret;
}
-__extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_p8 (const poly8_t * __a)
{
poly8x8x3_t ret;
@@ -15542,7 +18056,8 @@ vld3_p8 (const poly8_t * __a)
return ret;
}
-__extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_s16 (const int16_t * __a)
{
int16x4x3_t ret;
@@ -15554,7 +18069,8 @@ vld3_s16 (const int16_t * __a)
return ret;
}
-__extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_p16 (const poly16_t * __a)
{
poly16x4x3_t ret;
@@ -15566,7 +18082,8 @@ vld3_p16 (const poly16_t * __a)
return ret;
}
-__extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_s32 (const int32_t * __a)
{
int32x2x3_t ret;
@@ -15578,7 +18095,8 @@ vld3_s32 (const int32_t * __a)
return ret;
}
-__extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_u8 (const uint8_t * __a)
{
uint8x8x3_t ret;
@@ -15590,7 +18108,8 @@ vld3_u8 (const uint8_t * __a)
return ret;
}
-__extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_u16 (const uint16_t * __a)
{
uint16x4x3_t ret;
@@ -15602,7 +18121,8 @@ vld3_u16 (const uint16_t * __a)
return ret;
}
-__extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_u32 (const uint32_t * __a)
{
uint32x2x3_t ret;
@@ -15614,7 +18134,8 @@ vld3_u32 (const uint32_t * __a)
return ret;
}
-__extension__ static __inline float16x4x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_f16 (const float16_t * __a)
{
float16x4x3_t ret;
@@ -15626,7 +18147,8 @@ vld3_f16 (const float16_t * __a)
return ret;
}
-__extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_f32 (const float32_t * __a)
{
float32x2x3_t ret;
@@ -15638,7 +18160,21 @@ vld3_f32 (const float32_t * __a)
return ret;
}
-__extension__ static __inline int8x16x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_p64 (const poly64_t * __a)
+{
+ poly64x1x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 0);
+ ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 1);
+ ret.val[2] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 2);
+ return ret;
+}
+
+__extension__ extern __inline int8x16x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_s8 (const int8_t * __a)
{
int8x16x3_t ret;
@@ -15650,7 +18186,8 @@ vld3q_s8 (const int8_t * __a)
return ret;
}
-__extension__ static __inline poly8x16x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_p8 (const poly8_t * __a)
{
poly8x16x3_t ret;
@@ -15662,7 +18199,8 @@ vld3q_p8 (const poly8_t * __a)
return ret;
}
-__extension__ static __inline int16x8x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_s16 (const int16_t * __a)
{
int16x8x3_t ret;
@@ -15674,7 +18212,8 @@ vld3q_s16 (const int16_t * __a)
return ret;
}
-__extension__ static __inline poly16x8x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_p16 (const poly16_t * __a)
{
poly16x8x3_t ret;
@@ -15686,7 +18225,8 @@ vld3q_p16 (const poly16_t * __a)
return ret;
}
-__extension__ static __inline int32x4x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_s32 (const int32_t * __a)
{
int32x4x3_t ret;
@@ -15698,7 +18238,8 @@ vld3q_s32 (const int32_t * __a)
return ret;
}
-__extension__ static __inline int64x2x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_s64 (const int64_t * __a)
{
int64x2x3_t ret;
@@ -15710,7 +18251,8 @@ vld3q_s64 (const int64_t * __a)
return ret;
}
-__extension__ static __inline uint8x16x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_u8 (const uint8_t * __a)
{
uint8x16x3_t ret;
@@ -15722,7 +18264,8 @@ vld3q_u8 (const uint8_t * __a)
return ret;
}
-__extension__ static __inline uint16x8x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_u16 (const uint16_t * __a)
{
uint16x8x3_t ret;
@@ -15734,7 +18277,8 @@ vld3q_u16 (const uint16_t * __a)
return ret;
}
-__extension__ static __inline uint32x4x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_u32 (const uint32_t * __a)
{
uint32x4x3_t ret;
@@ -15746,7 +18290,8 @@ vld3q_u32 (const uint32_t * __a)
return ret;
}
-__extension__ static __inline uint64x2x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_u64 (const uint64_t * __a)
{
uint64x2x3_t ret;
@@ -15758,7 +18303,8 @@ vld3q_u64 (const uint64_t * __a)
return ret;
}
-__extension__ static __inline float16x8x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_f16 (const float16_t * __a)
{
float16x8x3_t ret;
@@ -15770,7 +18316,8 @@ vld3q_f16 (const float16_t * __a)
return ret;
}
-__extension__ static __inline float32x4x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_f32 (const float32_t * __a)
{
float32x4x3_t ret;
@@ -15782,7 +18329,8 @@ vld3q_f32 (const float32_t * __a)
return ret;
}
-__extension__ static __inline float64x2x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_f64 (const float64_t * __a)
{
float64x2x3_t ret;
@@ -15794,7 +18342,21 @@ vld3q_f64 (const float64_t * __a)
return ret;
}
-__extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_p64 (const poly64_t * __a)
+{
+ poly64x2x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3v2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 0);
+ ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 1);
+ ret.val[2] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 2);
+ return ret;
+}
+
+__extension__ extern __inline int64x1x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_s64 (const int64_t * __a)
{
int64x1x4_t ret;
@@ -15807,7 +18369,8 @@ vld4_s64 (const int64_t * __a)
return ret;
}
-__extension__ static __inline uint64x1x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_u64 (const uint64_t * __a)
{
uint64x1x4_t ret;
@@ -15820,7 +18383,8 @@ vld4_u64 (const uint64_t * __a)
return ret;
}
-__extension__ static __inline float64x1x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_f64 (const float64_t * __a)
{
float64x1x4_t ret;
@@ -15833,7 +18397,8 @@ vld4_f64 (const float64_t * __a)
return ret;
}
-__extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_s8 (const int8_t * __a)
{
int8x8x4_t ret;
@@ -15846,7 +18411,8 @@ vld4_s8 (const int8_t * __a)
return ret;
}
-__extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_p8 (const poly8_t * __a)
{
poly8x8x4_t ret;
@@ -15859,7 +18425,8 @@ vld4_p8 (const poly8_t * __a)
return ret;
}
-__extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_s16 (const int16_t * __a)
{
int16x4x4_t ret;
@@ -15872,7 +18439,8 @@ vld4_s16 (const int16_t * __a)
return ret;
}
-__extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_p16 (const poly16_t * __a)
{
poly16x4x4_t ret;
@@ -15885,7 +18453,8 @@ vld4_p16 (const poly16_t * __a)
return ret;
}
-__extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_s32 (const int32_t * __a)
{
int32x2x4_t ret;
@@ -15898,7 +18467,8 @@ vld4_s32 (const int32_t * __a)
return ret;
}
-__extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_u8 (const uint8_t * __a)
{
uint8x8x4_t ret;
@@ -15911,7 +18481,8 @@ vld4_u8 (const uint8_t * __a)
return ret;
}
-__extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_u16 (const uint16_t * __a)
{
uint16x4x4_t ret;
@@ -15924,7 +18495,8 @@ vld4_u16 (const uint16_t * __a)
return ret;
}
-__extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_u32 (const uint32_t * __a)
{
uint32x2x4_t ret;
@@ -15937,7 +18509,8 @@ vld4_u32 (const uint32_t * __a)
return ret;
}
-__extension__ static __inline float16x4x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_f16 (const float16_t * __a)
{
float16x4x4_t ret;
@@ -15950,7 +18523,8 @@ vld4_f16 (const float16_t * __a)
return ret;
}
-__extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_f32 (const float32_t * __a)
{
float32x2x4_t ret;
@@ -15963,7 +18537,22 @@ vld4_f32 (const float32_t * __a)
return ret;
}
-__extension__ static __inline int8x16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_p64 (const poly64_t * __a)
+{
+ poly64x1x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 0);
+ ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 1);
+ ret.val[2] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 2);
+ ret.val[3] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 3);
+ return ret;
+}
+
+__extension__ extern __inline int8x16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_s8 (const int8_t * __a)
{
int8x16x4_t ret;
@@ -15976,7 +18565,8 @@ vld4q_s8 (const int8_t * __a)
return ret;
}
-__extension__ static __inline poly8x16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_p8 (const poly8_t * __a)
{
poly8x16x4_t ret;
@@ -15989,7 +18579,8 @@ vld4q_p8 (const poly8_t * __a)
return ret;
}
-__extension__ static __inline int16x8x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_s16 (const int16_t * __a)
{
int16x8x4_t ret;
@@ -16002,7 +18593,8 @@ vld4q_s16 (const int16_t * __a)
return ret;
}
-__extension__ static __inline poly16x8x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_p16 (const poly16_t * __a)
{
poly16x8x4_t ret;
@@ -16015,7 +18607,8 @@ vld4q_p16 (const poly16_t * __a)
return ret;
}
-__extension__ static __inline int32x4x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_s32 (const int32_t * __a)
{
int32x4x4_t ret;
@@ -16028,7 +18621,8 @@ vld4q_s32 (const int32_t * __a)
return ret;
}
-__extension__ static __inline int64x2x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_s64 (const int64_t * __a)
{
int64x2x4_t ret;
@@ -16041,7 +18635,8 @@ vld4q_s64 (const int64_t * __a)
return ret;
}
-__extension__ static __inline uint8x16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_u8 (const uint8_t * __a)
{
uint8x16x4_t ret;
@@ -16054,7 +18649,8 @@ vld4q_u8 (const uint8_t * __a)
return ret;
}
-__extension__ static __inline uint16x8x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_u16 (const uint16_t * __a)
{
uint16x8x4_t ret;
@@ -16067,7 +18663,8 @@ vld4q_u16 (const uint16_t * __a)
return ret;
}
-__extension__ static __inline uint32x4x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_u32 (const uint32_t * __a)
{
uint32x4x4_t ret;
@@ -16080,7 +18677,8 @@ vld4q_u32 (const uint32_t * __a)
return ret;
}
-__extension__ static __inline uint64x2x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_u64 (const uint64_t * __a)
{
uint64x2x4_t ret;
@@ -16093,7 +18691,8 @@ vld4q_u64 (const uint64_t * __a)
return ret;
}
-__extension__ static __inline float16x8x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_f16 (const float16_t * __a)
{
float16x8x4_t ret;
@@ -16106,7 +18705,8 @@ vld4q_f16 (const float16_t * __a)
return ret;
}
-__extension__ static __inline float32x4x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_f32 (const float32_t * __a)
{
float32x4x4_t ret;
@@ -16119,7 +18719,8 @@ vld4q_f32 (const float32_t * __a)
return ret;
}
-__extension__ static __inline float64x2x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_f64 (const float64_t * __a)
{
float64x2x4_t ret;
@@ -16132,9 +18733,24 @@ vld4q_f64 (const float64_t * __a)
return ret;
}
+__extension__ extern __inline poly64x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_p64 (const poly64_t * __a)
+{
+ poly64x2x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4v2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 0);
+ ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 1);
+ ret.val[2] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 2);
+ ret.val[3] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 3);
+ return ret;
+}
+
/* vldn_dup */
-__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_s8 (const int8_t * __a)
{
int8x8x2_t ret;
@@ -16145,7 +18761,8 @@ vld2_dup_s8 (const int8_t * __a)
return ret;
}
-__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_s16 (const int16_t * __a)
{
int16x4x2_t ret;
@@ -16156,7 +18773,8 @@ vld2_dup_s16 (const int16_t * __a)
return ret;
}
-__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_s32 (const int32_t * __a)
{
int32x2x2_t ret;
@@ -16167,7 +18785,8 @@ vld2_dup_s32 (const int32_t * __a)
return ret;
}
-__extension__ static __inline float16x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_f16 (const float16_t * __a)
{
float16x4x2_t ret;
@@ -16178,7 +18797,8 @@ vld2_dup_f16 (const float16_t * __a)
return ret;
}
-__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_f32 (const float32_t * __a)
{
float32x2x2_t ret;
@@ -16189,7 +18809,8 @@ vld2_dup_f32 (const float32_t * __a)
return ret;
}
-__extension__ static __inline float64x1x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_f64 (const float64_t * __a)
{
float64x1x2_t ret;
@@ -16200,7 +18821,8 @@ vld2_dup_f64 (const float64_t * __a)
return ret;
}
-__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_u8 (const uint8_t * __a)
{
uint8x8x2_t ret;
@@ -16211,7 +18833,8 @@ vld2_dup_u8 (const uint8_t * __a)
return ret;
}
-__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_u16 (const uint16_t * __a)
{
uint16x4x2_t ret;
@@ -16222,7 +18845,8 @@ vld2_dup_u16 (const uint16_t * __a)
return ret;
}
-__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_u32 (const uint32_t * __a)
{
uint32x2x2_t ret;
@@ -16233,7 +18857,8 @@ vld2_dup_u32 (const uint32_t * __a)
return ret;
}
-__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_p8 (const poly8_t * __a)
{
poly8x8x2_t ret;
@@ -16244,7 +18869,8 @@ vld2_dup_p8 (const poly8_t * __a)
return ret;
}
-__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_p16 (const poly16_t * __a)
{
poly16x4x2_t ret;
@@ -16255,7 +18881,21 @@ vld2_dup_p16 (const poly16_t * __a)
return ret;
}
-__extension__ static __inline int64x1x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2_dup_p64 (const poly64_t * __a)
+{
+ poly64x1x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2rv2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregoidi_pss (__o, 0);
+ ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregoidi_pss (__o, 1);
+ return ret;
+}
+
+
+__extension__ extern __inline int64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_s64 (const int64_t * __a)
{
int64x1x2_t ret;
@@ -16266,7 +18906,8 @@ vld2_dup_s64 (const int64_t * __a)
return ret;
}
-__extension__ static __inline uint64x1x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_u64 (const uint64_t * __a)
{
uint64x1x2_t ret;
@@ -16277,7 +18918,8 @@ vld2_dup_u64 (const uint64_t * __a)
return ret;
}
-__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_dup_s8 (const int8_t * __a)
{
int8x16x2_t ret;
@@ -16288,7 +18930,8 @@ vld2q_dup_s8 (const int8_t * __a)
return ret;
}
-__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_dup_p8 (const poly8_t * __a)
{
poly8x16x2_t ret;
@@ -16299,7 +18942,8 @@ vld2q_dup_p8 (const poly8_t * __a)
return ret;
}
-__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_dup_s16 (const int16_t * __a)
{
int16x8x2_t ret;
@@ -16310,7 +18954,8 @@ vld2q_dup_s16 (const int16_t * __a)
return ret;
}
-__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_dup_p16 (const poly16_t * __a)
{
poly16x8x2_t ret;
@@ -16321,7 +18966,8 @@ vld2q_dup_p16 (const poly16_t * __a)
return ret;
}
-__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_dup_s32 (const int32_t * __a)
{
int32x4x2_t ret;
@@ -16332,7 +18978,8 @@ vld2q_dup_s32 (const int32_t * __a)
return ret;
}
-__extension__ static __inline int64x2x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_dup_s64 (const int64_t * __a)
{
int64x2x2_t ret;
@@ -16343,7 +18990,8 @@ vld2q_dup_s64 (const int64_t * __a)
return ret;
}
-__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_dup_u8 (const uint8_t * __a)
{
uint8x16x2_t ret;
@@ -16354,7 +19002,8 @@ vld2q_dup_u8 (const uint8_t * __a)
return ret;
}
-__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_dup_u16 (const uint16_t * __a)
{
uint16x8x2_t ret;
@@ -16365,7 +19014,8 @@ vld2q_dup_u16 (const uint16_t * __a)
return ret;
}
-__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_dup_u32 (const uint32_t * __a)
{
uint32x4x2_t ret;
@@ -16376,7 +19026,8 @@ vld2q_dup_u32 (const uint32_t * __a)
return ret;
}
-__extension__ static __inline uint64x2x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_dup_u64 (const uint64_t * __a)
{
uint64x2x2_t ret;
@@ -16387,7 +19038,8 @@ vld2q_dup_u64 (const uint64_t * __a)
return ret;
}
-__extension__ static __inline float16x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_dup_f16 (const float16_t * __a)
{
float16x8x2_t ret;
@@ -16398,7 +19050,8 @@ vld2q_dup_f16 (const float16_t * __a)
return ret;
}
-__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_dup_f32 (const float32_t * __a)
{
float32x4x2_t ret;
@@ -16409,7 +19062,8 @@ vld2q_dup_f32 (const float32_t * __a)
return ret;
}
-__extension__ static __inline float64x2x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld2q_dup_f64 (const float64_t * __a)
{
float64x2x2_t ret;
@@ -16420,7 +19074,20 @@ vld2q_dup_f64 (const float64_t * __a)
return ret;
}
-__extension__ static __inline int64x1x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld2q_dup_p64 (const poly64_t * __a)
+{
+ poly64x2x2_t ret;
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_ld2rv2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregoiv2di_pss (__o, 0);
+ ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregoiv2di_pss (__o, 1);
+ return ret;
+}
+
+__extension__ extern __inline int64x1x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_s64 (const int64_t * __a)
{
int64x1x3_t ret;
@@ -16432,7 +19099,8 @@ vld3_dup_s64 (const int64_t * __a)
return ret;
}
-__extension__ static __inline uint64x1x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_u64 (const uint64_t * __a)
{
uint64x1x3_t ret;
@@ -16444,7 +19112,8 @@ vld3_dup_u64 (const uint64_t * __a)
return ret;
}
-__extension__ static __inline float64x1x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_f64 (const float64_t * __a)
{
float64x1x3_t ret;
@@ -16456,7 +19125,8 @@ vld3_dup_f64 (const float64_t * __a)
return ret;
}
-__extension__ static __inline int8x8x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_s8 (const int8_t * __a)
{
int8x8x3_t ret;
@@ -16468,7 +19138,8 @@ vld3_dup_s8 (const int8_t * __a)
return ret;
}
-__extension__ static __inline poly8x8x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_p8 (const poly8_t * __a)
{
poly8x8x3_t ret;
@@ -16480,7 +19151,8 @@ vld3_dup_p8 (const poly8_t * __a)
return ret;
}
-__extension__ static __inline int16x4x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_s16 (const int16_t * __a)
{
int16x4x3_t ret;
@@ -16492,7 +19164,8 @@ vld3_dup_s16 (const int16_t * __a)
return ret;
}
-__extension__ static __inline poly16x4x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_p16 (const poly16_t * __a)
{
poly16x4x3_t ret;
@@ -16504,7 +19177,8 @@ vld3_dup_p16 (const poly16_t * __a)
return ret;
}
-__extension__ static __inline int32x2x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_s32 (const int32_t * __a)
{
int32x2x3_t ret;
@@ -16516,7 +19190,8 @@ vld3_dup_s32 (const int32_t * __a)
return ret;
}
-__extension__ static __inline uint8x8x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_u8 (const uint8_t * __a)
{
uint8x8x3_t ret;
@@ -16528,7 +19203,8 @@ vld3_dup_u8 (const uint8_t * __a)
return ret;
}
-__extension__ static __inline uint16x4x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_u16 (const uint16_t * __a)
{
uint16x4x3_t ret;
@@ -16540,7 +19216,8 @@ vld3_dup_u16 (const uint16_t * __a)
return ret;
}
-__extension__ static __inline uint32x2x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_u32 (const uint32_t * __a)
{
uint32x2x3_t ret;
@@ -16552,7 +19229,8 @@ vld3_dup_u32 (const uint32_t * __a)
return ret;
}
-__extension__ static __inline float16x4x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_f16 (const float16_t * __a)
{
float16x4x3_t ret;
@@ -16564,7 +19242,8 @@ vld3_dup_f16 (const float16_t * __a)
return ret;
}
-__extension__ static __inline float32x2x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_f32 (const float32_t * __a)
{
float32x2x3_t ret;
@@ -16576,7 +19255,21 @@ vld3_dup_f32 (const float32_t * __a)
return ret;
}
-__extension__ static __inline int8x16x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3_dup_p64 (const poly64_t * __a)
+{
+ poly64x1x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3rv2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 0);
+ ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 1);
+ ret.val[2] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 2);
+ return ret;
+}
+
+__extension__ extern __inline int8x16x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_dup_s8 (const int8_t * __a)
{
int8x16x3_t ret;
@@ -16588,7 +19281,8 @@ vld3q_dup_s8 (const int8_t * __a)
return ret;
}
-__extension__ static __inline poly8x16x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_dup_p8 (const poly8_t * __a)
{
poly8x16x3_t ret;
@@ -16600,7 +19294,8 @@ vld3q_dup_p8 (const poly8_t * __a)
return ret;
}
-__extension__ static __inline int16x8x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_dup_s16 (const int16_t * __a)
{
int16x8x3_t ret;
@@ -16612,7 +19307,8 @@ vld3q_dup_s16 (const int16_t * __a)
return ret;
}
-__extension__ static __inline poly16x8x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_dup_p16 (const poly16_t * __a)
{
poly16x8x3_t ret;
@@ -16624,7 +19320,8 @@ vld3q_dup_p16 (const poly16_t * __a)
return ret;
}
-__extension__ static __inline int32x4x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_dup_s32 (const int32_t * __a)
{
int32x4x3_t ret;
@@ -16636,7 +19333,8 @@ vld3q_dup_s32 (const int32_t * __a)
return ret;
}
-__extension__ static __inline int64x2x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_dup_s64 (const int64_t * __a)
{
int64x2x3_t ret;
@@ -16648,7 +19346,8 @@ vld3q_dup_s64 (const int64_t * __a)
return ret;
}
-__extension__ static __inline uint8x16x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_dup_u8 (const uint8_t * __a)
{
uint8x16x3_t ret;
@@ -16660,7 +19359,8 @@ vld3q_dup_u8 (const uint8_t * __a)
return ret;
}
-__extension__ static __inline uint16x8x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_dup_u16 (const uint16_t * __a)
{
uint16x8x3_t ret;
@@ -16672,7 +19372,8 @@ vld3q_dup_u16 (const uint16_t * __a)
return ret;
}
-__extension__ static __inline uint32x4x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_dup_u32 (const uint32_t * __a)
{
uint32x4x3_t ret;
@@ -16684,7 +19385,8 @@ vld3q_dup_u32 (const uint32_t * __a)
return ret;
}
-__extension__ static __inline uint64x2x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_dup_u64 (const uint64_t * __a)
{
uint64x2x3_t ret;
@@ -16696,7 +19398,8 @@ vld3q_dup_u64 (const uint64_t * __a)
return ret;
}
-__extension__ static __inline float16x8x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_dup_f16 (const float16_t * __a)
{
float16x8x3_t ret;
@@ -16708,7 +19411,8 @@ vld3q_dup_f16 (const float16_t * __a)
return ret;
}
-__extension__ static __inline float32x4x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_dup_f32 (const float32_t * __a)
{
float32x4x3_t ret;
@@ -16720,7 +19424,8 @@ vld3q_dup_f32 (const float32_t * __a)
return ret;
}
-__extension__ static __inline float64x2x3_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld3q_dup_f64 (const float64_t * __a)
{
float64x2x3_t ret;
@@ -16732,7 +19437,21 @@ vld3q_dup_f64 (const float64_t * __a)
return ret;
}
-__extension__ static __inline int64x1x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x2x3_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld3q_dup_p64 (const poly64_t * __a)
+{
+ poly64x2x3_t ret;
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_ld3rv2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 0);
+ ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 1);
+ ret.val[2] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 2);
+ return ret;
+}
+
+__extension__ extern __inline int64x1x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_s64 (const int64_t * __a)
{
int64x1x4_t ret;
@@ -16745,7 +19464,8 @@ vld4_dup_s64 (const int64_t * __a)
return ret;
}
-__extension__ static __inline uint64x1x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_u64 (const uint64_t * __a)
{
uint64x1x4_t ret;
@@ -16758,7 +19478,8 @@ vld4_dup_u64 (const uint64_t * __a)
return ret;
}
-__extension__ static __inline float64x1x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_f64 (const float64_t * __a)
{
float64x1x4_t ret;
@@ -16771,7 +19492,8 @@ vld4_dup_f64 (const float64_t * __a)
return ret;
}
-__extension__ static __inline int8x8x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_s8 (const int8_t * __a)
{
int8x8x4_t ret;
@@ -16784,7 +19506,8 @@ vld4_dup_s8 (const int8_t * __a)
return ret;
}
-__extension__ static __inline poly8x8x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_p8 (const poly8_t * __a)
{
poly8x8x4_t ret;
@@ -16797,7 +19520,8 @@ vld4_dup_p8 (const poly8_t * __a)
return ret;
}
-__extension__ static __inline int16x4x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_s16 (const int16_t * __a)
{
int16x4x4_t ret;
@@ -16810,7 +19534,8 @@ vld4_dup_s16 (const int16_t * __a)
return ret;
}
-__extension__ static __inline poly16x4x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_p16 (const poly16_t * __a)
{
poly16x4x4_t ret;
@@ -16823,7 +19548,8 @@ vld4_dup_p16 (const poly16_t * __a)
return ret;
}
-__extension__ static __inline int32x2x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_s32 (const int32_t * __a)
{
int32x2x4_t ret;
@@ -16836,7 +19562,8 @@ vld4_dup_s32 (const int32_t * __a)
return ret;
}
-__extension__ static __inline uint8x8x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_u8 (const uint8_t * __a)
{
uint8x8x4_t ret;
@@ -16849,7 +19576,8 @@ vld4_dup_u8 (const uint8_t * __a)
return ret;
}
-__extension__ static __inline uint16x4x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_u16 (const uint16_t * __a)
{
uint16x4x4_t ret;
@@ -16862,7 +19590,8 @@ vld4_dup_u16 (const uint16_t * __a)
return ret;
}
-__extension__ static __inline uint32x2x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_u32 (const uint32_t * __a)
{
uint32x2x4_t ret;
@@ -16875,7 +19604,8 @@ vld4_dup_u32 (const uint32_t * __a)
return ret;
}
-__extension__ static __inline float16x4x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_f16 (const float16_t * __a)
{
float16x4x4_t ret;
@@ -16888,7 +19618,8 @@ vld4_dup_f16 (const float16_t * __a)
return ret;
}
-__extension__ static __inline float32x2x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_f32 (const float32_t * __a)
{
float32x2x4_t ret;
@@ -16901,7 +19632,22 @@ vld4_dup_f32 (const float32_t * __a)
return ret;
}
-__extension__ static __inline int8x16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4_dup_p64 (const poly64_t * __a)
+{
+ poly64x1x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4rv2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 0);
+ ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 1);
+ ret.val[2] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 2);
+ ret.val[3] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 3);
+ return ret;
+}
+
+__extension__ extern __inline int8x16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_dup_s8 (const int8_t * __a)
{
int8x16x4_t ret;
@@ -16914,7 +19660,8 @@ vld4q_dup_s8 (const int8_t * __a)
return ret;
}
-__extension__ static __inline poly8x16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_dup_p8 (const poly8_t * __a)
{
poly8x16x4_t ret;
@@ -16927,7 +19674,8 @@ vld4q_dup_p8 (const poly8_t * __a)
return ret;
}
-__extension__ static __inline int16x8x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_dup_s16 (const int16_t * __a)
{
int16x8x4_t ret;
@@ -16940,7 +19688,8 @@ vld4q_dup_s16 (const int16_t * __a)
return ret;
}
-__extension__ static __inline poly16x8x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_dup_p16 (const poly16_t * __a)
{
poly16x8x4_t ret;
@@ -16953,7 +19702,8 @@ vld4q_dup_p16 (const poly16_t * __a)
return ret;
}
-__extension__ static __inline int32x4x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_dup_s32 (const int32_t * __a)
{
int32x4x4_t ret;
@@ -16966,7 +19716,8 @@ vld4q_dup_s32 (const int32_t * __a)
return ret;
}
-__extension__ static __inline int64x2x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_dup_s64 (const int64_t * __a)
{
int64x2x4_t ret;
@@ -16979,7 +19730,8 @@ vld4q_dup_s64 (const int64_t * __a)
return ret;
}
-__extension__ static __inline uint8x16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_dup_u8 (const uint8_t * __a)
{
uint8x16x4_t ret;
@@ -16992,7 +19744,8 @@ vld4q_dup_u8 (const uint8_t * __a)
return ret;
}
-__extension__ static __inline uint16x8x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_dup_u16 (const uint16_t * __a)
{
uint16x8x4_t ret;
@@ -17005,7 +19758,8 @@ vld4q_dup_u16 (const uint16_t * __a)
return ret;
}
-__extension__ static __inline uint32x4x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_dup_u32 (const uint32_t * __a)
{
uint32x4x4_t ret;
@@ -17018,7 +19772,8 @@ vld4q_dup_u32 (const uint32_t * __a)
return ret;
}
-__extension__ static __inline uint64x2x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_dup_u64 (const uint64_t * __a)
{
uint64x2x4_t ret;
@@ -17031,7 +19786,8 @@ vld4q_dup_u64 (const uint64_t * __a)
return ret;
}
-__extension__ static __inline float16x8x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_dup_f16 (const float16_t * __a)
{
float16x8x4_t ret;
@@ -17044,7 +19800,8 @@ vld4q_dup_f16 (const float16_t * __a)
return ret;
}
-__extension__ static __inline float32x4x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_dup_f32 (const float32_t * __a)
{
float32x4x4_t ret;
@@ -17057,7 +19814,8 @@ vld4q_dup_f32 (const float32_t * __a)
return ret;
}
-__extension__ static __inline float64x2x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vld4q_dup_f64 (const float64_t * __a)
{
float64x2x4_t ret;
@@ -17070,11 +19828,26 @@ vld4q_dup_f64 (const float64_t * __a)
return ret;
}
+__extension__ extern __inline poly64x2x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vld4q_dup_p64 (const poly64_t * __a)
+{
+ poly64x2x4_t ret;
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_ld4rv2di ((const __builtin_aarch64_simd_di *) __a);
+ ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 0);
+ ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 1);
+ ret.val[2] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 2);
+ ret.val[3] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 3);
+ return ret;
+}
+
/* vld2_lane */
#define __LD2_LANE_FUNC(intype, vectype, largetype, ptrtype, mode, \
qmode, ptrmode, funcsuffix, signedtype) \
-__extension__ static __inline intype __attribute__ ((__always_inline__)) \
+__extension__ extern __inline intype \
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__)) \
vld2_lane_##funcsuffix (const ptrtype * __ptr, intype __b, const int __c) \
{ \
__builtin_aarch64_simd_oi __o; \
@@ -17106,6 +19879,8 @@ __LD2_LANE_FUNC (poly8x8x2_t, poly8x8_t, poly8x16x2_t, poly8_t, v8qi, v16qi, qi,
int8x16_t)
__LD2_LANE_FUNC (poly16x4x2_t, poly16x4_t, poly16x8x2_t, poly16_t, v4hi, v8hi, hi,
p16, int16x8_t)
+__LD2_LANE_FUNC (poly64x1x2_t, poly64x1_t, poly64x2x2_t, poly64_t, di,
+ v2di_ssps, di, p64, poly64x2_t)
__LD2_LANE_FUNC (int8x8x2_t, int8x8_t, int8x16x2_t, int8_t, v8qi, v16qi, qi, s8,
int8x16_t)
__LD2_LANE_FUNC (int16x4x2_t, int16x4_t, int16x8x2_t, int16_t, v4hi, v8hi, hi, s16,
@@ -17128,7 +19903,8 @@ __LD2_LANE_FUNC (uint64x1x2_t, uint64x1_t, uint64x2x2_t, uint64_t, di, v2di, di,
/* vld2q_lane */
#define __LD2_LANE_FUNC(intype, vtype, ptrtype, mode, ptrmode, funcsuffix) \
-__extension__ static __inline intype __attribute__ ((__always_inline__)) \
+__extension__ extern __inline intype \
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__)) \
vld2q_lane_##funcsuffix (const ptrtype * __ptr, intype __b, const int __c) \
{ \
__builtin_aarch64_simd_oi __o; \
@@ -17147,6 +19923,7 @@ __LD2_LANE_FUNC (float32x4x2_t, float32x4_t, float32_t, v4sf, sf, f32)
__LD2_LANE_FUNC (float64x2x2_t, float64x2_t, float64_t, v2df, df, f64)
__LD2_LANE_FUNC (poly8x16x2_t, poly8x16_t, poly8_t, v16qi, qi, p8)
__LD2_LANE_FUNC (poly16x8x2_t, poly16x8_t, poly16_t, v8hi, hi, p16)
+__LD2_LANE_FUNC (poly64x2x2_t, poly64x2_t, poly64_t, v2di, di, p64)
__LD2_LANE_FUNC (int8x16x2_t, int8x16_t, int8_t, v16qi, qi, s8)
__LD2_LANE_FUNC (int16x8x2_t, int16x8_t, int16_t, v8hi, hi, s16)
__LD2_LANE_FUNC (int32x4x2_t, int32x4_t, int32_t, v4si, si, s32)
@@ -17162,7 +19939,8 @@ __LD2_LANE_FUNC (uint64x2x2_t, uint64x2_t, uint64_t, v2di, di, u64)
#define __LD3_LANE_FUNC(intype, vectype, largetype, ptrtype, mode, \
qmode, ptrmode, funcsuffix, signedtype) \
-__extension__ static __inline intype __attribute__ ((__always_inline__)) \
+__extension__ extern __inline intype \
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__)) \
vld3_lane_##funcsuffix (const ptrtype * __ptr, intype __b, const int __c) \
{ \
__builtin_aarch64_simd_ci __o; \
@@ -17200,6 +19978,8 @@ __LD3_LANE_FUNC (poly8x8x3_t, poly8x8_t, poly8x16x3_t, poly8_t, v8qi, v16qi, qi,
int8x16_t)
__LD3_LANE_FUNC (poly16x4x3_t, poly16x4_t, poly16x8x3_t, poly16_t, v4hi, v8hi, hi,
p16, int16x8_t)
+__LD3_LANE_FUNC (poly64x1x3_t, poly64x1_t, poly64x2x3_t, poly64_t, di,
+ v2di_ssps, di, p64, poly64x2_t)
__LD3_LANE_FUNC (int8x8x3_t, int8x8_t, int8x16x3_t, int8_t, v8qi, v16qi, qi, s8,
int8x16_t)
__LD3_LANE_FUNC (int16x4x3_t, int16x4_t, int16x8x3_t, int16_t, v4hi, v8hi, hi, s16,
@@ -17222,7 +20002,8 @@ __LD3_LANE_FUNC (uint64x1x3_t, uint64x1_t, uint64x2x3_t, uint64_t, di, v2di, di,
/* vld3q_lane */
#define __LD3_LANE_FUNC(intype, vtype, ptrtype, mode, ptrmode, funcsuffix) \
-__extension__ static __inline intype __attribute__ ((__always_inline__)) \
+__extension__ extern __inline intype \
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__)) \
vld3q_lane_##funcsuffix (const ptrtype * __ptr, intype __b, const int __c) \
{ \
__builtin_aarch64_simd_ci __o; \
@@ -17243,6 +20024,7 @@ __LD3_LANE_FUNC (float32x4x3_t, float32x4_t, float32_t, v4sf, sf, f32)
__LD3_LANE_FUNC (float64x2x3_t, float64x2_t, float64_t, v2df, df, f64)
__LD3_LANE_FUNC (poly8x16x3_t, poly8x16_t, poly8_t, v16qi, qi, p8)
__LD3_LANE_FUNC (poly16x8x3_t, poly16x8_t, poly16_t, v8hi, hi, p16)
+__LD3_LANE_FUNC (poly64x2x3_t, poly64x2_t, poly64_t, v2di, di, p64)
__LD3_LANE_FUNC (int8x16x3_t, int8x16_t, int8_t, v16qi, qi, s8)
__LD3_LANE_FUNC (int16x8x3_t, int16x8_t, int16_t, v8hi, hi, s16)
__LD3_LANE_FUNC (int32x4x3_t, int32x4_t, int32_t, v4si, si, s32)
@@ -17258,7 +20040,8 @@ __LD3_LANE_FUNC (uint64x2x3_t, uint64x2_t, uint64_t, v2di, di, u64)
#define __LD4_LANE_FUNC(intype, vectype, largetype, ptrtype, mode, \
qmode, ptrmode, funcsuffix, signedtype) \
-__extension__ static __inline intype __attribute__ ((__always_inline__)) \
+__extension__ extern __inline intype \
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__)) \
vld4_lane_##funcsuffix (const ptrtype * __ptr, intype __b, const int __c) \
{ \
__builtin_aarch64_simd_xi __o; \
@@ -17304,6 +20087,8 @@ __LD4_LANE_FUNC (poly8x8x4_t, poly8x8_t, poly8x16x4_t, poly8_t, v8qi, v16qi, qi,
int8x16_t)
__LD4_LANE_FUNC (poly16x4x4_t, poly16x4_t, poly16x8x4_t, poly16_t, v4hi, v8hi, hi,
p16, int16x8_t)
+__LD4_LANE_FUNC (poly64x1x4_t, poly64x1_t, poly64x2x4_t, poly64_t, di,
+ v2di_ssps, di, p64, poly64x2_t)
__LD4_LANE_FUNC (int8x8x4_t, int8x8_t, int8x16x4_t, int8_t, v8qi, v16qi, qi, s8,
int8x16_t)
__LD4_LANE_FUNC (int16x4x4_t, int16x4_t, int16x8x4_t, int16_t, v4hi, v8hi, hi, s16,
@@ -17326,7 +20111,8 @@ __LD4_LANE_FUNC (uint64x1x4_t, uint64x1_t, uint64x2x4_t, uint64_t, di, v2di, di,
/* vld4q_lane */
#define __LD4_LANE_FUNC(intype, vtype, ptrtype, mode, ptrmode, funcsuffix) \
-__extension__ static __inline intype __attribute__ ((__always_inline__)) \
+__extension__ extern __inline intype \
+__attribute__ ((__always_inline__, __gnu_inline__,__artificial__)) \
vld4q_lane_##funcsuffix (const ptrtype * __ptr, intype __b, const int __c) \
{ \
__builtin_aarch64_simd_xi __o; \
@@ -17349,6 +20135,7 @@ __LD4_LANE_FUNC (float32x4x4_t, float32x4_t, float32_t, v4sf, sf, f32)
__LD4_LANE_FUNC (float64x2x4_t, float64x2_t, float64_t, v2df, df, f64)
__LD4_LANE_FUNC (poly8x16x4_t, poly8x16_t, poly8_t, v16qi, qi, p8)
__LD4_LANE_FUNC (poly16x8x4_t, poly16x8_t, poly16_t, v8hi, hi, p16)
+__LD4_LANE_FUNC (poly64x2x4_t, poly64x2_t, poly64_t, v2di, di, p64)
__LD4_LANE_FUNC (int8x16x4_t, int8x16_t, int8_t, v16qi, qi, s8)
__LD4_LANE_FUNC (int16x8x4_t, int16x8_t, int16_t, v8hi, hi, s16)
__LD4_LANE_FUNC (int32x4x4_t, int32x4_t, int32_t, v4si, si, s32)
@@ -17362,96 +20149,120 @@ __LD4_LANE_FUNC (uint64x2x4_t, uint64x2_t, uint64_t, v2di, di, u64)
/* vmax */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmax_f32 (float32x2_t __a, float32x2_t __b)
{
return __builtin_aarch64_smax_nanv2sf (__a, __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmax_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return (float64x1_t)
+ { __builtin_aarch64_smax_nandf (vget_lane_f64 (__a, 0),
+ vget_lane_f64 (__b, 0)) };
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmax_s8 (int8x8_t __a, int8x8_t __b)
{
return __builtin_aarch64_smaxv8qi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmax_s16 (int16x4_t __a, int16x4_t __b)
{
return __builtin_aarch64_smaxv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmax_s32 (int32x2_t __a, int32x2_t __b)
{
return __builtin_aarch64_smaxv2si (__a, __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmax_u8 (uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t) __builtin_aarch64_umaxv8qi ((int8x8_t) __a,
(int8x8_t) __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmax_u16 (uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t) __builtin_aarch64_umaxv4hi ((int16x4_t) __a,
(int16x4_t) __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmax_u32 (uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t) __builtin_aarch64_umaxv2si ((int32x2_t) __a,
(int32x2_t) __b);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxq_f32 (float32x4_t __a, float32x4_t __b)
{
return __builtin_aarch64_smax_nanv4sf (__a, __b);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxq_f64 (float64x2_t __a, float64x2_t __b)
{
return __builtin_aarch64_smax_nanv2df (__a, __b);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxq_s8 (int8x16_t __a, int8x16_t __b)
{
return __builtin_aarch64_smaxv16qi (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxq_s16 (int16x8_t __a, int16x8_t __b)
{
return __builtin_aarch64_smaxv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxq_s32 (int32x4_t __a, int32x4_t __b)
{
return __builtin_aarch64_smaxv4si (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t) __builtin_aarch64_umaxv16qi ((int8x16_t) __a,
(int8x16_t) __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t) __builtin_aarch64_umaxv8hi ((int16x8_t) __a,
(int16x8_t) __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t) __builtin_aarch64_umaxv4si ((int32x4_t) __a,
@@ -17459,109 +20270,127 @@ vmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
}
/* vmulx */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulx_f32 (float32x2_t __a, float32x2_t __b)
{
return __builtin_aarch64_fmulxv2sf (__a, __b);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulxq_f32 (float32x4_t __a, float32x4_t __b)
{
return __builtin_aarch64_fmulxv4sf (__a, __b);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulx_f64 (float64x1_t __a, float64x1_t __b)
{
return (float64x1_t) {__builtin_aarch64_fmulxdf (__a[0], __b[0])};
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulxq_f64 (float64x2_t __a, float64x2_t __b)
{
return __builtin_aarch64_fmulxv2df (__a, __b);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulxs_f32 (float32_t __a, float32_t __b)
{
return __builtin_aarch64_fmulxsf (__a, __b);
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulxd_f64 (float64_t __a, float64_t __b)
{
return __builtin_aarch64_fmulxdf (__a, __b);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulx_lane_f32 (float32x2_t __a, float32x2_t __v, const int __lane)
{
return vmulx_f32 (__a, __aarch64_vdup_lane_f32 (__v, __lane));
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulx_lane_f64 (float64x1_t __a, float64x1_t __v, const int __lane)
{
return vmulx_f64 (__a, __aarch64_vdup_lane_f64 (__v, __lane));
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulxq_lane_f32 (float32x4_t __a, float32x2_t __v, const int __lane)
{
return vmulxq_f32 (__a, __aarch64_vdupq_lane_f32 (__v, __lane));
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulxq_lane_f64 (float64x2_t __a, float64x1_t __v, const int __lane)
{
return vmulxq_f64 (__a, __aarch64_vdupq_lane_f64 (__v, __lane));
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulx_laneq_f32 (float32x2_t __a, float32x4_t __v, const int __lane)
{
return vmulx_f32 (__a, __aarch64_vdup_laneq_f32 (__v, __lane));
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulx_laneq_f64 (float64x1_t __a, float64x2_t __v, const int __lane)
{
return vmulx_f64 (__a, __aarch64_vdup_laneq_f64 (__v, __lane));
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulxq_laneq_f32 (float32x4_t __a, float32x4_t __v, const int __lane)
{
return vmulxq_f32 (__a, __aarch64_vdupq_laneq_f32 (__v, __lane));
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulxq_laneq_f64 (float64x2_t __a, float64x2_t __v, const int __lane)
{
return vmulxq_f64 (__a, __aarch64_vdupq_laneq_f64 (__v, __lane));
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulxs_lane_f32 (float32_t __a, float32x2_t __v, const int __lane)
{
return vmulxs_f32 (__a, __aarch64_vget_lane_any (__v, __lane));
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulxs_laneq_f32 (float32_t __a, float32x4_t __v, const int __lane)
{
return vmulxs_f32 (__a, __aarch64_vget_lane_any (__v, __lane));
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulxd_lane_f64 (float64_t __a, float64x1_t __v, const int __lane)
{
return vmulxd_f64 (__a, __aarch64_vget_lane_any (__v, __lane));
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulxd_laneq_f64 (float64_t __a, float64x2_t __v, const int __lane)
{
return vmulxd_f64 (__a, __aarch64_vget_lane_any (__v, __lane));
@@ -17569,109 +20398,126 @@ vmulxd_laneq_f64 (float64_t __a, float64x2_t __v, const int __lane)
/* vpmax */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmax_s8 (int8x8_t a, int8x8_t b)
{
return __builtin_aarch64_smaxpv8qi (a, b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmax_s16 (int16x4_t a, int16x4_t b)
{
return __builtin_aarch64_smaxpv4hi (a, b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmax_s32 (int32x2_t a, int32x2_t b)
{
return __builtin_aarch64_smaxpv2si (a, b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmax_u8 (uint8x8_t a, uint8x8_t b)
{
return (uint8x8_t) __builtin_aarch64_umaxpv8qi ((int8x8_t) a,
(int8x8_t) b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmax_u16 (uint16x4_t a, uint16x4_t b)
{
return (uint16x4_t) __builtin_aarch64_umaxpv4hi ((int16x4_t) a,
(int16x4_t) b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmax_u32 (uint32x2_t a, uint32x2_t b)
{
return (uint32x2_t) __builtin_aarch64_umaxpv2si ((int32x2_t) a,
(int32x2_t) b);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmaxq_s8 (int8x16_t a, int8x16_t b)
{
return __builtin_aarch64_smaxpv16qi (a, b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmaxq_s16 (int16x8_t a, int16x8_t b)
{
return __builtin_aarch64_smaxpv8hi (a, b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmaxq_s32 (int32x4_t a, int32x4_t b)
{
return __builtin_aarch64_smaxpv4si (a, b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmaxq_u8 (uint8x16_t a, uint8x16_t b)
{
return (uint8x16_t) __builtin_aarch64_umaxpv16qi ((int8x16_t) a,
(int8x16_t) b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmaxq_u16 (uint16x8_t a, uint16x8_t b)
{
return (uint16x8_t) __builtin_aarch64_umaxpv8hi ((int16x8_t) a,
(int16x8_t) b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmaxq_u32 (uint32x4_t a, uint32x4_t b)
{
return (uint32x4_t) __builtin_aarch64_umaxpv4si ((int32x4_t) a,
(int32x4_t) b);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmax_f32 (float32x2_t a, float32x2_t b)
{
return __builtin_aarch64_smax_nanpv2sf (a, b);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmaxq_f32 (float32x4_t a, float32x4_t b)
{
return __builtin_aarch64_smax_nanpv4sf (a, b);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmaxq_f64 (float64x2_t a, float64x2_t b)
{
return __builtin_aarch64_smax_nanpv2df (a, b);
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmaxqd_f64 (float64x2_t a)
{
return __builtin_aarch64_reduc_smax_nan_scal_v2df (a);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmaxs_f32 (float32x2_t a)
{
return __builtin_aarch64_reduc_smax_nan_scal_v2sf (a);
@@ -17679,31 +20525,36 @@ vpmaxs_f32 (float32x2_t a)
/* vpmaxnm */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmaxnm_f32 (float32x2_t a, float32x2_t b)
{
return __builtin_aarch64_smaxpv2sf (a, b);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmaxnmq_f32 (float32x4_t a, float32x4_t b)
{
return __builtin_aarch64_smaxpv4sf (a, b);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmaxnmq_f64 (float64x2_t a, float64x2_t b)
{
return __builtin_aarch64_smaxpv2df (a, b);
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmaxnmqd_f64 (float64x2_t a)
{
return __builtin_aarch64_reduc_smax_scal_v2df (a);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmaxnms_f32 (float32x2_t a)
{
return __builtin_aarch64_reduc_smax_scal_v2sf (a);
@@ -17711,109 +20562,126 @@ vpmaxnms_f32 (float32x2_t a)
/* vpmin */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmin_s8 (int8x8_t a, int8x8_t b)
{
return __builtin_aarch64_sminpv8qi (a, b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmin_s16 (int16x4_t a, int16x4_t b)
{
return __builtin_aarch64_sminpv4hi (a, b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmin_s32 (int32x2_t a, int32x2_t b)
{
return __builtin_aarch64_sminpv2si (a, b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmin_u8 (uint8x8_t a, uint8x8_t b)
{
return (uint8x8_t) __builtin_aarch64_uminpv8qi ((int8x8_t) a,
(int8x8_t) b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmin_u16 (uint16x4_t a, uint16x4_t b)
{
return (uint16x4_t) __builtin_aarch64_uminpv4hi ((int16x4_t) a,
(int16x4_t) b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmin_u32 (uint32x2_t a, uint32x2_t b)
{
return (uint32x2_t) __builtin_aarch64_uminpv2si ((int32x2_t) a,
(int32x2_t) b);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpminq_s8 (int8x16_t a, int8x16_t b)
{
return __builtin_aarch64_sminpv16qi (a, b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpminq_s16 (int16x8_t a, int16x8_t b)
{
return __builtin_aarch64_sminpv8hi (a, b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpminq_s32 (int32x4_t a, int32x4_t b)
{
return __builtin_aarch64_sminpv4si (a, b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpminq_u8 (uint8x16_t a, uint8x16_t b)
{
return (uint8x16_t) __builtin_aarch64_uminpv16qi ((int8x16_t) a,
(int8x16_t) b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpminq_u16 (uint16x8_t a, uint16x8_t b)
{
return (uint16x8_t) __builtin_aarch64_uminpv8hi ((int16x8_t) a,
(int16x8_t) b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpminq_u32 (uint32x4_t a, uint32x4_t b)
{
return (uint32x4_t) __builtin_aarch64_uminpv4si ((int32x4_t) a,
(int32x4_t) b);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmin_f32 (float32x2_t a, float32x2_t b)
{
return __builtin_aarch64_smin_nanpv2sf (a, b);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpminq_f32 (float32x4_t a, float32x4_t b)
{
return __builtin_aarch64_smin_nanpv4sf (a, b);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpminq_f64 (float64x2_t a, float64x2_t b)
{
return __builtin_aarch64_smin_nanpv2df (a, b);
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpminqd_f64 (float64x2_t a)
{
return __builtin_aarch64_reduc_smin_nan_scal_v2df (a);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpmins_f32 (float32x2_t a)
{
return __builtin_aarch64_reduc_smin_nan_scal_v2sf (a);
@@ -17821,31 +20689,36 @@ vpmins_f32 (float32x2_t a)
/* vpminnm */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpminnm_f32 (float32x2_t a, float32x2_t b)
{
return __builtin_aarch64_sminpv2sf (a, b);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpminnmq_f32 (float32x4_t a, float32x4_t b)
{
return __builtin_aarch64_sminpv4sf (a, b);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpminnmq_f64 (float64x2_t a, float64x2_t b)
{
return __builtin_aarch64_sminpv2df (a, b);
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpminnmqd_f64 (float64x2_t a)
{
return __builtin_aarch64_reduc_smin_scal_v2df (a);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpminnms_f32 (float32x2_t a)
{
return __builtin_aarch64_reduc_smin_scal_v2sf (a);
@@ -17853,19 +20726,31 @@ vpminnms_f32 (float32x2_t a)
/* vmaxnm */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxnm_f32 (float32x2_t __a, float32x2_t __b)
{
return __builtin_aarch64_fmaxv2sf (__a, __b);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxnm_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return (float64x1_t)
+ { __builtin_aarch64_fmaxdf (vget_lane_f64 (__a, 0),
+ vget_lane_f64 (__b, 0)) };
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxnmq_f32 (float32x4_t __a, float32x4_t __b)
{
return __builtin_aarch64_fmaxv4sf (__a, __b);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxnmq_f64 (float64x2_t __a, float64x2_t __b)
{
return __builtin_aarch64_fmaxv2df (__a, __b);
@@ -17873,91 +20758,106 @@ vmaxnmq_f64 (float64x2_t __a, float64x2_t __b)
/* vmaxv */
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxv_f32 (float32x2_t __a)
{
return __builtin_aarch64_reduc_smax_nan_scal_v2sf (__a);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxv_s8 (int8x8_t __a)
{
return __builtin_aarch64_reduc_smax_scal_v8qi (__a);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxv_s16 (int16x4_t __a)
{
return __builtin_aarch64_reduc_smax_scal_v4hi (__a);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxv_s32 (int32x2_t __a)
{
return __builtin_aarch64_reduc_smax_scal_v2si (__a);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxv_u8 (uint8x8_t __a)
{
return __builtin_aarch64_reduc_umax_scal_v8qi_uu (__a);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxv_u16 (uint16x4_t __a)
{
return __builtin_aarch64_reduc_umax_scal_v4hi_uu (__a);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxv_u32 (uint32x2_t __a)
{
return __builtin_aarch64_reduc_umax_scal_v2si_uu (__a);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxvq_f32 (float32x4_t __a)
{
return __builtin_aarch64_reduc_smax_nan_scal_v4sf (__a);
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxvq_f64 (float64x2_t __a)
{
return __builtin_aarch64_reduc_smax_nan_scal_v2df (__a);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxvq_s8 (int8x16_t __a)
{
return __builtin_aarch64_reduc_smax_scal_v16qi (__a);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxvq_s16 (int16x8_t __a)
{
return __builtin_aarch64_reduc_smax_scal_v8hi (__a);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxvq_s32 (int32x4_t __a)
{
return __builtin_aarch64_reduc_smax_scal_v4si (__a);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxvq_u8 (uint8x16_t __a)
{
return __builtin_aarch64_reduc_umax_scal_v16qi_uu (__a);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxvq_u16 (uint16x8_t __a)
{
return __builtin_aarch64_reduc_umax_scal_v8hi_uu (__a);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxvq_u32 (uint32x4_t __a)
{
return __builtin_aarch64_reduc_umax_scal_v4si_uu (__a);
@@ -17965,19 +20865,22 @@ vmaxvq_u32 (uint32x4_t __a)
/* vmaxnmv */
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxnmv_f32 (float32x2_t __a)
{
return __builtin_aarch64_reduc_smax_scal_v2sf (__a);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxnmvq_f32 (float32x4_t __a)
{
return __builtin_aarch64_reduc_smax_scal_v4sf (__a);
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmaxnmvq_f64 (float64x2_t __a)
{
return __builtin_aarch64_reduc_smax_scal_v2df (__a);
@@ -17985,96 +20888,120 @@ vmaxnmvq_f64 (float64x2_t __a)
/* vmin */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmin_f32 (float32x2_t __a, float32x2_t __b)
{
return __builtin_aarch64_smin_nanv2sf (__a, __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmin_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return (float64x1_t)
+ { __builtin_aarch64_smin_nandf (vget_lane_f64 (__a, 0),
+ vget_lane_f64 (__b, 0)) };
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmin_s8 (int8x8_t __a, int8x8_t __b)
{
return __builtin_aarch64_sminv8qi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmin_s16 (int16x4_t __a, int16x4_t __b)
{
return __builtin_aarch64_sminv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmin_s32 (int32x2_t __a, int32x2_t __b)
{
return __builtin_aarch64_sminv2si (__a, __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmin_u8 (uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t) __builtin_aarch64_uminv8qi ((int8x8_t) __a,
(int8x8_t) __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmin_u16 (uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t) __builtin_aarch64_uminv4hi ((int16x4_t) __a,
(int16x4_t) __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmin_u32 (uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t) __builtin_aarch64_uminv2si ((int32x2_t) __a,
(int32x2_t) __b);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminq_f32 (float32x4_t __a, float32x4_t __b)
{
return __builtin_aarch64_smin_nanv4sf (__a, __b);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminq_f64 (float64x2_t __a, float64x2_t __b)
{
return __builtin_aarch64_smin_nanv2df (__a, __b);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminq_s8 (int8x16_t __a, int8x16_t __b)
{
return __builtin_aarch64_sminv16qi (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminq_s16 (int16x8_t __a, int16x8_t __b)
{
return __builtin_aarch64_sminv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminq_s32 (int32x4_t __a, int32x4_t __b)
{
return __builtin_aarch64_sminv4si (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t) __builtin_aarch64_uminv16qi ((int8x16_t) __a,
(int8x16_t) __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t) __builtin_aarch64_uminv8hi ((int16x8_t) __a,
(int16x8_t) __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t) __builtin_aarch64_uminv4si ((int32x4_t) __a,
@@ -18083,19 +21010,31 @@ vminq_u32 (uint32x4_t __a, uint32x4_t __b)
/* vminnm */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminnm_f32 (float32x2_t __a, float32x2_t __b)
{
return __builtin_aarch64_fminv2sf (__a, __b);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminnm_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return (float64x1_t)
+ { __builtin_aarch64_fmindf (vget_lane_f64 (__a, 0),
+ vget_lane_f64 (__b, 0)) };
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminnmq_f32 (float32x4_t __a, float32x4_t __b)
{
return __builtin_aarch64_fminv4sf (__a, __b);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminnmq_f64 (float64x2_t __a, float64x2_t __b)
{
return __builtin_aarch64_fminv2df (__a, __b);
@@ -18103,91 +21042,106 @@ vminnmq_f64 (float64x2_t __a, float64x2_t __b)
/* vminv */
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminv_f32 (float32x2_t __a)
{
return __builtin_aarch64_reduc_smin_nan_scal_v2sf (__a);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminv_s8 (int8x8_t __a)
{
return __builtin_aarch64_reduc_smin_scal_v8qi (__a);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminv_s16 (int16x4_t __a)
{
return __builtin_aarch64_reduc_smin_scal_v4hi (__a);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminv_s32 (int32x2_t __a)
{
return __builtin_aarch64_reduc_smin_scal_v2si (__a);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminv_u8 (uint8x8_t __a)
{
return __builtin_aarch64_reduc_umin_scal_v8qi_uu (__a);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminv_u16 (uint16x4_t __a)
{
return __builtin_aarch64_reduc_umin_scal_v4hi_uu (__a);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminv_u32 (uint32x2_t __a)
{
return __builtin_aarch64_reduc_umin_scal_v2si_uu (__a);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminvq_f32 (float32x4_t __a)
{
return __builtin_aarch64_reduc_smin_nan_scal_v4sf (__a);
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminvq_f64 (float64x2_t __a)
{
return __builtin_aarch64_reduc_smin_nan_scal_v2df (__a);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminvq_s8 (int8x16_t __a)
{
return __builtin_aarch64_reduc_smin_scal_v16qi (__a);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminvq_s16 (int16x8_t __a)
{
return __builtin_aarch64_reduc_smin_scal_v8hi (__a);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminvq_s32 (int32x4_t __a)
{
return __builtin_aarch64_reduc_smin_scal_v4si (__a);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminvq_u8 (uint8x16_t __a)
{
return __builtin_aarch64_reduc_umin_scal_v16qi_uu (__a);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminvq_u16 (uint16x8_t __a)
{
return __builtin_aarch64_reduc_umin_scal_v8hi_uu (__a);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminvq_u32 (uint32x4_t __a)
{
return __builtin_aarch64_reduc_umin_scal_v4si_uu (__a);
@@ -18195,19 +21149,22 @@ vminvq_u32 (uint32x4_t __a)
/* vminnmv */
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminnmv_f32 (float32x2_t __a)
{
return __builtin_aarch64_reduc_smin_scal_v2sf (__a);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminnmvq_f32 (float32x4_t __a)
{
return __builtin_aarch64_reduc_smin_scal_v4sf (__a);
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vminnmvq_f64 (float64x2_t __a)
{
return __builtin_aarch64_reduc_smin_scal_v2df (__a);
@@ -18215,25 +21172,29 @@ vminnmvq_f64 (float64x2_t __a)
/* vmla */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
{
return a + b * c;
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c)
{
return __a + __b * __c;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
{
return a + b * c;
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
{
return a + b * c;
@@ -18241,35 +21202,40 @@ vmlaq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
/* vmla_lane */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_lane_f32 (float32x2_t __a, float32x2_t __b,
float32x2_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_lane_s16 (int16x4_t __a, int16x4_t __b,
int16x4_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_lane_s32 (int32x2_t __a, int32x2_t __b,
int32x2_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_lane_u16 (uint16x4_t __a, uint16x4_t __b,
uint16x4_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_lane_u32 (uint32x2_t __a, uint32x2_t __b,
uint32x2_t __c, const int __lane)
{
@@ -18278,35 +21244,40 @@ vmla_lane_u32 (uint32x2_t __a, uint32x2_t __b,
/* vmla_laneq */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_laneq_f32 (float32x2_t __a, float32x2_t __b,
float32x4_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_laneq_s16 (int16x4_t __a, int16x4_t __b,
int16x8_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_laneq_s32 (int32x2_t __a, int32x2_t __b,
int32x4_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_laneq_u16 (uint16x4_t __a, uint16x4_t __b,
uint16x8_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_laneq_u32 (uint32x2_t __a, uint32x2_t __b,
uint32x4_t __c, const int __lane)
{
@@ -18315,35 +21286,40 @@ vmla_laneq_u32 (uint32x2_t __a, uint32x2_t __b,
/* vmlaq_lane */
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b,
float32x2_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_lane_s16 (int16x8_t __a, int16x8_t __b,
int16x4_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_lane_s32 (int32x4_t __a, int32x4_t __b,
int32x2_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_lane_u16 (uint16x8_t __a, uint16x8_t __b,
uint16x4_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_lane_u32 (uint32x4_t __a, uint32x4_t __b,
uint32x2_t __c, const int __lane)
{
@@ -18352,35 +21328,40 @@ vmlaq_lane_u32 (uint32x4_t __a, uint32x4_t __b,
/* vmlaq_laneq */
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_laneq_f32 (float32x4_t __a, float32x4_t __b,
float32x4_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_laneq_s16 (int16x8_t __a, int16x8_t __b,
int16x8_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_laneq_s32 (int32x4_t __a, int32x4_t __b,
int32x4_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_laneq_u16 (uint16x8_t __a, uint16x8_t __b,
uint16x8_t __c, const int __lane)
{
return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_laneq_u32 (uint32x4_t __a, uint32x4_t __b,
uint32x4_t __c, const int __lane)
{
@@ -18389,25 +21370,29 @@ vmlaq_laneq_u32 (uint32x4_t __a, uint32x4_t __b,
/* vmls */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_f32 (float32x2_t a, float32x2_t b, float32x2_t c)
{
return a - b * c;
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c)
{
return __a - __b * __c;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_f32 (float32x4_t a, float32x4_t b, float32x4_t c)
{
return a - b * c;
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
{
return a - b * c;
@@ -18415,35 +21400,40 @@ vmlsq_f64 (float64x2_t a, float64x2_t b, float64x2_t c)
/* vmls_lane */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_lane_f32 (float32x2_t __a, float32x2_t __b,
float32x2_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_lane_s16 (int16x4_t __a, int16x4_t __b,
int16x4_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_lane_s32 (int32x2_t __a, int32x2_t __b,
int32x2_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_lane_u16 (uint16x4_t __a, uint16x4_t __b,
uint16x4_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_lane_u32 (uint32x2_t __a, uint32x2_t __b,
uint32x2_t __c, const int __lane)
{
@@ -18452,35 +21442,40 @@ vmls_lane_u32 (uint32x2_t __a, uint32x2_t __b,
/* vmls_laneq */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_laneq_f32 (float32x2_t __a, float32x2_t __b,
float32x4_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_laneq_s16 (int16x4_t __a, int16x4_t __b,
int16x8_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_laneq_s32 (int32x2_t __a, int32x2_t __b,
int32x4_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_laneq_u16 (uint16x4_t __a, uint16x4_t __b,
uint16x8_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_laneq_u32 (uint32x2_t __a, uint32x2_t __b,
uint32x4_t __c, const int __lane)
{
@@ -18489,35 +21484,40 @@ vmls_laneq_u32 (uint32x2_t __a, uint32x2_t __b,
/* vmlsq_lane */
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b,
float32x2_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_lane_s16 (int16x8_t __a, int16x8_t __b,
int16x4_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_lane_s32 (int32x4_t __a, int32x4_t __b,
int32x2_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_lane_u16 (uint16x8_t __a, uint16x8_t __b,
uint16x4_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_lane_u32 (uint32x4_t __a, uint32x4_t __b,
uint32x2_t __c, const int __lane)
{
@@ -18526,34 +21526,39 @@ vmlsq_lane_u32 (uint32x4_t __a, uint32x4_t __b,
/* vmlsq_laneq */
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_laneq_f32 (float32x4_t __a, float32x4_t __b,
float32x4_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_laneq_s16 (int16x8_t __a, int16x8_t __b,
int16x8_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_laneq_s32 (int32x4_t __a, int32x4_t __b,
int32x4_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_laneq_u16 (uint16x8_t __a, uint16x8_t __b,
uint16x8_t __c, const int __lane)
{
return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_laneq_u32 (uint32x4_t __a, uint32x4_t __b,
uint32x4_t __c, const int __lane)
{
@@ -18562,145 +21567,197 @@ vmlsq_laneq_u32 (uint32x4_t __a, uint32x4_t __b,
/* vmov_n_ */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmov_n_f16 (float16_t __a)
+{
+ return vdup_n_f16 (__a);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_f32 (float32_t __a)
{
return vdup_n_f32 (__a);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_f64 (float64_t __a)
{
return (float64x1_t) {__a};
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_p8 (poly8_t __a)
{
return vdup_n_p8 (__a);
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_p16 (poly16_t __a)
{
return vdup_n_p16 (__a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmov_n_p64 (poly64_t __a)
+{
+ return vdup_n_p64 (__a);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_s8 (int8_t __a)
{
return vdup_n_s8 (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_s16 (int16_t __a)
{
return vdup_n_s16 (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_s32 (int32_t __a)
{
return vdup_n_s32 (__a);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_s64 (int64_t __a)
{
return (int64x1_t) {__a};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_u8 (uint8_t __a)
{
return vdup_n_u8 (__a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_u16 (uint16_t __a)
{
return vdup_n_u16 (__a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_u32 (uint32_t __a)
{
return vdup_n_u32 (__a);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_u64 (uint64_t __a)
{
return (uint64x1_t) {__a};
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovq_n_f16 (float16_t __a)
+{
+ return vdupq_n_f16 (__a);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_f32 (float32_t __a)
{
return vdupq_n_f32 (__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_f64 (float64_t __a)
{
return vdupq_n_f64 (__a);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_p8 (poly8_t __a)
{
return vdupq_n_p8 (__a);
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_p16 (poly16_t __a)
{
return vdupq_n_p16 (__a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmovq_n_p64 (poly64_t __a)
+{
+ return vdupq_n_p64 (__a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_s8 (int8_t __a)
{
return vdupq_n_s8 (__a);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_s16 (int16_t __a)
{
return vdupq_n_s16 (__a);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_s32 (int32_t __a)
{
return vdupq_n_s32 (__a);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_s64 (int64_t __a)
{
return vdupq_n_s64 (__a);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_u8 (uint8_t __a)
{
return vdupq_n_u8 (__a);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_u16 (uint16_t __a)
{
return vdupq_n_u16 (__a);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_u32 (uint32_t __a)
{
return vdupq_n_u32 (__a);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_u64 (uint64_t __a)
{
return vdupq_n_u64 (__a);
@@ -18708,37 +21765,43 @@ vmovq_n_u64 (uint64_t __a)
/* vmul_lane */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_lane_f32 (float32x2_t __a, float32x2_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_lane_f64 (float64x1_t __a, float64x1_t __b, const int __lane)
{
return __a * __b;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_lane_s16 (int16x4_t __a, int16x4_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_lane_s32 (int32x2_t __a, int32x2_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
@@ -18746,13 +21809,15 @@ vmul_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __lane)
/* vmuld_lane */
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmuld_lane_f64 (float64_t __a, float64x1_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmuld_laneq_f64 (float64_t __a, float64x2_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
@@ -18760,13 +21825,15 @@ vmuld_laneq_f64 (float64_t __a, float64x2_t __b, const int __lane)
/* vmuls_lane */
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmuls_lane_f32 (float32_t __a, float32x2_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmuls_laneq_f32 (float32_t __a, float32x4_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
@@ -18774,37 +21841,43 @@ vmuls_laneq_f32 (float32_t __a, float32x4_t __b, const int __lane)
/* vmul_laneq */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_laneq_f32 (float32x2_t __a, float32x4_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_laneq_f64 (float64x1_t __a, float64x2_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_laneq_u16 (uint16x4_t __a, uint16x8_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_laneq_u32 (uint32x2_t __a, uint32x4_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
@@ -18812,7 +21885,8 @@ vmul_laneq_u32 (uint32x2_t __a, uint32x4_t __b, const int __lane)
/* vmul_n */
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmul_n_f64 (float64x1_t __a, float64_t __b)
{
return (float64x1_t) { vget_lane_f64 (__a, 0) * __b };
@@ -18820,38 +21894,44 @@ vmul_n_f64 (float64x1_t __a, float64_t __b)
/* vmulq_lane */
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_lane_f32 (float32x4_t __a, float32x2_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_lane_f64 (float64x2_t __a, float64x1_t __b, const int __lane)
{
__AARCH64_LANE_CHECK (__a, __lane);
return __a * __b[0];
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_lane_u16 (uint16x8_t __a, uint16x4_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_lane_u32 (uint32x4_t __a, uint32x2_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
@@ -18859,111 +21939,308 @@ vmulq_lane_u32 (uint32x4_t __a, uint32x2_t __b, const int __lane)
/* vmulq_laneq */
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_laneq_f32 (float32x4_t __a, float32x4_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_laneq_f64 (float64x2_t __a, float64x2_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_laneq_u16 (uint16x8_t __a, uint16x8_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmulq_laneq_u32 (uint32x4_t __a, uint32x4_t __b, const int __lane)
{
return __a * __aarch64_vget_lane_any (__b, __lane);
}
+/* vmul_n. */
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_n_f32 (float32x2_t __a, float32_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_n_f32 (float32x4_t __a, float32_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_n_f64 (float64x2_t __a, float64_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_n_s16 (int16x4_t __a, int16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_n_s16 (int16x8_t __a, int16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_n_s32 (int32x2_t __a, int32_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_n_s32 (int32x4_t __a, int32_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_n_u16 (uint16x4_t __a, uint16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_n_u32 (uint32x2_t __a, uint32_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+ return __a * __b;
+}
+
+/* vmvn */
+
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvn_p8 (poly8x8_t __a)
+{
+ return (poly8x8_t) ~((int8x8_t) __a);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvn_s8 (int8x8_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvn_s16 (int16x4_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvn_s32 (int32x2_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvn_u8 (uint8x8_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvn_u16 (uint16x4_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvn_u32 (uint32x2_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvnq_p8 (poly8x16_t __a)
+{
+ return (poly8x16_t) ~((int8x16_t) __a);
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvnq_s8 (int8x16_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvnq_s16 (int16x8_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvnq_s32 (int32x4_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvnq_u8 (uint8x16_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvnq_u16 (uint16x8_t __a)
+{
+ return ~__a;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmvnq_u32 (uint32x4_t __a)
+{
+ return ~__a;
+}
+
/* vneg */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vneg_f32 (float32x2_t __a)
{
return -__a;
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vneg_f64 (float64x1_t __a)
{
return -__a;
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vneg_s8 (int8x8_t __a)
{
return -__a;
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vneg_s16 (int16x4_t __a)
{
return -__a;
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vneg_s32 (int32x2_t __a)
{
return -__a;
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vneg_s64 (int64x1_t __a)
{
return -__a;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vnegq_f32 (float32x4_t __a)
{
return -__a;
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vnegq_f64 (float64x2_t __a)
{
return -__a;
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vnegq_s8 (int8x16_t __a)
{
return -__a;
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vnegq_s16 (int16x8_t __a)
{
return -__a;
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vnegq_s32 (int32x4_t __a)
{
return -__a;
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vnegq_s64 (int64x2_t __a)
{
return -__a;
@@ -18971,58 +22248,95 @@ vnegq_s64 (int64x2_t __a)
/* vpadd */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadd_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __builtin_aarch64_faddpv2sf (__a, __b);
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpaddq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_aarch64_faddpv4sf (__a, __b);
+}
+
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpaddq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __builtin_aarch64_faddpv2df (__a, __b);
+}
+
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadd_s8 (int8x8_t __a, int8x8_t __b)
{
return __builtin_aarch64_addpv8qi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadd_s16 (int16x4_t __a, int16x4_t __b)
{
return __builtin_aarch64_addpv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadd_s32 (int32x2_t __a, int32x2_t __b)
{
return __builtin_aarch64_addpv2si (__a, __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadd_u8 (uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t) __builtin_aarch64_addpv8qi ((int8x8_t) __a,
(int8x8_t) __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadd_u16 (uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t) __builtin_aarch64_addpv4hi ((int16x4_t) __a,
(int16x4_t) __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpadd_u32 (uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t) __builtin_aarch64_addpv2si ((int32x2_t) __a,
(int32x2_t) __b);
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadds_f32 (float32x2_t __a)
+{
+ return __builtin_aarch64_reduc_plus_scal_v2sf (__a);
+}
+
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddd_f64 (float64x2_t __a)
{
return __builtin_aarch64_reduc_plus_scal_v2df (__a);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddd_s64 (int64x2_t __a)
{
return __builtin_aarch64_addpdi (__a);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddd_u64 (uint64x2_t __a)
{
return __builtin_aarch64_addpdi ((int64x2_t) __a);
@@ -19030,31 +22344,36 @@ vpaddd_u64 (uint64x2_t __a)
/* vqabs */
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqabsq_s64 (int64x2_t __a)
{
return (int64x2_t) __builtin_aarch64_sqabsv2di (__a);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqabsb_s8 (int8_t __a)
{
return (int8_t) __builtin_aarch64_sqabsqi (__a);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqabsh_s16 (int16_t __a)
{
return (int16_t) __builtin_aarch64_sqabshi (__a);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqabss_s32 (int32_t __a)
{
return (int32_t) __builtin_aarch64_sqabssi (__a);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqabsd_s64 (int64_t __a)
{
return __builtin_aarch64_sqabsdi (__a);
@@ -19062,49 +22381,57 @@ vqabsd_s64 (int64_t __a)
/* vqadd */
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddb_s8 (int8_t __a, int8_t __b)
{
return (int8_t) __builtin_aarch64_sqaddqi (__a, __b);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddh_s16 (int16_t __a, int16_t __b)
{
return (int16_t) __builtin_aarch64_sqaddhi (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadds_s32 (int32_t __a, int32_t __b)
{
return (int32_t) __builtin_aarch64_sqaddsi (__a, __b);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddd_s64 (int64_t __a, int64_t __b)
{
return __builtin_aarch64_sqadddi (__a, __b);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddb_u8 (uint8_t __a, uint8_t __b)
{
return (uint8_t) __builtin_aarch64_uqaddqi_uuu (__a, __b);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddh_u16 (uint16_t __a, uint16_t __b)
{
return (uint16_t) __builtin_aarch64_uqaddhi_uuu (__a, __b);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqadds_u32 (uint32_t __a, uint32_t __b)
{
return (uint32_t) __builtin_aarch64_uqaddsi_uuu (__a, __b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqaddd_u64 (uint64_t __a, uint64_t __b)
{
return __builtin_aarch64_uqadddi_uuu (__a, __b);
@@ -19112,137 +22439,159 @@ vqaddd_u64 (uint64_t __a, uint64_t __b)
/* vqdmlal */
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
{
return __builtin_aarch64_sqdmlalv4hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
{
return __builtin_aarch64_sqdmlal2v8hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_high_lane_s16 (int32x4_t __a, int16x8_t __b, int16x4_t __c,
int const __d)
{
return __builtin_aarch64_sqdmlal2_lanev8hi (__a, __b, __c, __d);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_high_laneq_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
int const __d)
{
return __builtin_aarch64_sqdmlal2_laneqv8hi (__a, __b, __c, __d);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
{
return __builtin_aarch64_sqdmlal2_nv8hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d)
{
return __builtin_aarch64_sqdmlal_lanev4hi (__a, __b, __c, __d);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_laneq_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d)
{
return __builtin_aarch64_sqdmlal_laneqv4hi (__a, __b, __c, __d);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
{
return __builtin_aarch64_sqdmlal_nv4hi (__a, __b, __c);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
{
return __builtin_aarch64_sqdmlalv2si (__a, __b, __c);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
{
return __builtin_aarch64_sqdmlal2v4si (__a, __b, __c);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_high_lane_s32 (int64x2_t __a, int32x4_t __b, int32x2_t __c,
int const __d)
{
return __builtin_aarch64_sqdmlal2_lanev4si (__a, __b, __c, __d);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_high_laneq_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
int const __d)
{
return __builtin_aarch64_sqdmlal2_laneqv4si (__a, __b, __c, __d);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
{
return __builtin_aarch64_sqdmlal2_nv4si (__a, __b, __c);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d)
{
return __builtin_aarch64_sqdmlal_lanev2si (__a, __b, __c, __d);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_laneq_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d)
{
return __builtin_aarch64_sqdmlal_laneqv2si (__a, __b, __c, __d);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
{
return __builtin_aarch64_sqdmlal_nv2si (__a, __b, __c);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlalh_s16 (int32_t __a, int16_t __b, int16_t __c)
{
return __builtin_aarch64_sqdmlalhi (__a, __b, __c);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlalh_lane_s16 (int32_t __a, int16_t __b, int16x4_t __c, const int __d)
{
return __builtin_aarch64_sqdmlal_lanehi (__a, __b, __c, __d);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlalh_laneq_s16 (int32_t __a, int16_t __b, int16x8_t __c, const int __d)
{
return __builtin_aarch64_sqdmlal_laneqhi (__a, __b, __c, __d);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlals_s32 (int64_t __a, int32_t __b, int32_t __c)
{
return __builtin_aarch64_sqdmlalsi (__a, __b, __c);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlals_lane_s32 (int64_t __a, int32_t __b, int32x2_t __c, const int __d)
{
return __builtin_aarch64_sqdmlal_lanesi (__a, __b, __c, __d);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlals_laneq_s32 (int64_t __a, int32_t __b, int32x4_t __c, const int __d)
{
return __builtin_aarch64_sqdmlal_laneqsi (__a, __b, __c, __d);
@@ -19250,137 +22599,159 @@ vqdmlals_laneq_s32 (int64_t __a, int32_t __b, int32x4_t __c, const int __d)
/* vqdmlsl */
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
{
return __builtin_aarch64_sqdmlslv4hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
{
return __builtin_aarch64_sqdmlsl2v8hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_high_lane_s16 (int32x4_t __a, int16x8_t __b, int16x4_t __c,
int const __d)
{
return __builtin_aarch64_sqdmlsl2_lanev8hi (__a, __b, __c, __d);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_high_laneq_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
int const __d)
{
return __builtin_aarch64_sqdmlsl2_laneqv8hi (__a, __b, __c, __d);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
{
return __builtin_aarch64_sqdmlsl2_nv8hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d)
{
return __builtin_aarch64_sqdmlsl_lanev4hi (__a, __b, __c, __d);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_laneq_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d)
{
return __builtin_aarch64_sqdmlsl_laneqv4hi (__a, __b, __c, __d);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
{
return __builtin_aarch64_sqdmlsl_nv4hi (__a, __b, __c);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
{
return __builtin_aarch64_sqdmlslv2si (__a, __b, __c);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
{
return __builtin_aarch64_sqdmlsl2v4si (__a, __b, __c);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_high_lane_s32 (int64x2_t __a, int32x4_t __b, int32x2_t __c,
int const __d)
{
return __builtin_aarch64_sqdmlsl2_lanev4si (__a, __b, __c, __d);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_high_laneq_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
int const __d)
{
return __builtin_aarch64_sqdmlsl2_laneqv4si (__a, __b, __c, __d);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
{
return __builtin_aarch64_sqdmlsl2_nv4si (__a, __b, __c);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d)
{
return __builtin_aarch64_sqdmlsl_lanev2si (__a, __b, __c, __d);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_laneq_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d)
{
return __builtin_aarch64_sqdmlsl_laneqv2si (__a, __b, __c, __d);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
{
return __builtin_aarch64_sqdmlsl_nv2si (__a, __b, __c);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlslh_s16 (int32_t __a, int16_t __b, int16_t __c)
{
return __builtin_aarch64_sqdmlslhi (__a, __b, __c);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlslh_lane_s16 (int32_t __a, int16_t __b, int16x4_t __c, const int __d)
{
return __builtin_aarch64_sqdmlsl_lanehi (__a, __b, __c, __d);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlslh_laneq_s16 (int32_t __a, int16_t __b, int16x8_t __c, const int __d)
{
return __builtin_aarch64_sqdmlsl_laneqhi (__a, __b, __c, __d);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsls_s32 (int64_t __a, int32_t __b, int32_t __c)
{
return __builtin_aarch64_sqdmlslsi (__a, __b, __c);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsls_lane_s32 (int64_t __a, int32_t __b, int32x2_t __c, const int __d)
{
return __builtin_aarch64_sqdmlsl_lanesi (__a, __b, __c, __d);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsls_laneq_s32 (int64_t __a, int32_t __b, int32x4_t __c, const int __d)
{
return __builtin_aarch64_sqdmlsl_laneqsi (__a, __b, __c, __d);
@@ -19388,61 +22759,71 @@ vqdmlsls_laneq_s32 (int64_t __a, int32_t __b, int32x4_t __c, const int __d)
/* vqdmulh */
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
return __builtin_aarch64_sqdmulh_lanev4hi (__a, __b, __c);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
return __builtin_aarch64_sqdmulh_lanev2si (__a, __b, __c);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
{
return __builtin_aarch64_sqdmulh_lanev8hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
{
return __builtin_aarch64_sqdmulh_lanev4si (__a, __b, __c);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhh_s16 (int16_t __a, int16_t __b)
{
return (int16_t) __builtin_aarch64_sqdmulhhi (__a, __b);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhh_lane_s16 (int16_t __a, int16x4_t __b, const int __c)
{
return __builtin_aarch64_sqdmulh_lanehi (__a, __b, __c);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhh_laneq_s16 (int16_t __a, int16x8_t __b, const int __c)
{
return __builtin_aarch64_sqdmulh_laneqhi (__a, __b, __c);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhs_s32 (int32_t __a, int32_t __b)
{
return (int32_t) __builtin_aarch64_sqdmulhsi (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhs_lane_s32 (int32_t __a, int32x2_t __b, const int __c)
{
return __builtin_aarch64_sqdmulh_lanesi (__a, __b, __c);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhs_laneq_s32 (int32_t __a, int32x4_t __b, const int __c)
{
return __builtin_aarch64_sqdmulh_laneqsi (__a, __b, __c);
@@ -19450,133 +22831,155 @@ vqdmulhs_laneq_s32 (int32_t __a, int32x4_t __b, const int __c)
/* vqdmull */
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_s16 (int16x4_t __a, int16x4_t __b)
{
return __builtin_aarch64_sqdmullv4hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_high_s16 (int16x8_t __a, int16x8_t __b)
{
return __builtin_aarch64_sqdmull2v8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_high_lane_s16 (int16x8_t __a, int16x4_t __b, int const __c)
{
return __builtin_aarch64_sqdmull2_lanev8hi (__a, __b,__c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_high_laneq_s16 (int16x8_t __a, int16x8_t __b, int const __c)
{
return __builtin_aarch64_sqdmull2_laneqv8hi (__a, __b,__c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_high_n_s16 (int16x8_t __a, int16_t __b)
{
return __builtin_aarch64_sqdmull2_nv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_lane_s16 (int16x4_t __a, int16x4_t __b, int const __c)
{
return __builtin_aarch64_sqdmull_lanev4hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_laneq_s16 (int16x4_t __a, int16x8_t __b, int const __c)
{
return __builtin_aarch64_sqdmull_laneqv4hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_n_s16 (int16x4_t __a, int16_t __b)
{
return __builtin_aarch64_sqdmull_nv4hi (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_s32 (int32x2_t __a, int32x2_t __b)
{
return __builtin_aarch64_sqdmullv2si (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_high_s32 (int32x4_t __a, int32x4_t __b)
{
return __builtin_aarch64_sqdmull2v4si (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_high_lane_s32 (int32x4_t __a, int32x2_t __b, int const __c)
{
return __builtin_aarch64_sqdmull2_lanev4si (__a, __b, __c);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_high_laneq_s32 (int32x4_t __a, int32x4_t __b, int const __c)
{
return __builtin_aarch64_sqdmull2_laneqv4si (__a, __b, __c);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_high_n_s32 (int32x4_t __a, int32_t __b)
{
return __builtin_aarch64_sqdmull2_nv4si (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_lane_s32 (int32x2_t __a, int32x2_t __b, int const __c)
{
return __builtin_aarch64_sqdmull_lanev2si (__a, __b, __c);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_laneq_s32 (int32x2_t __a, int32x4_t __b, int const __c)
{
return __builtin_aarch64_sqdmull_laneqv2si (__a, __b, __c);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_n_s32 (int32x2_t __a, int32_t __b)
{
return __builtin_aarch64_sqdmull_nv2si (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmullh_s16 (int16_t __a, int16_t __b)
{
return (int32_t) __builtin_aarch64_sqdmullhi (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmullh_lane_s16 (int16_t __a, int16x4_t __b, const int __c)
{
return __builtin_aarch64_sqdmull_lanehi (__a, __b, __c);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmullh_laneq_s16 (int16_t __a, int16x8_t __b, const int __c)
{
return __builtin_aarch64_sqdmull_laneqhi (__a, __b, __c);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulls_s32 (int32_t __a, int32_t __b)
{
return __builtin_aarch64_sqdmullsi (__a, __b);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulls_lane_s32 (int32_t __a, int32x2_t __b, const int __c)
{
return __builtin_aarch64_sqdmull_lanesi (__a, __b, __c);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqdmulls_laneq_s32 (int32_t __a, int32x4_t __b, const int __c)
{
return __builtin_aarch64_sqdmull_laneqsi (__a, __b, __c);
@@ -19584,73 +22987,85 @@ vqdmulls_laneq_s32 (int32_t __a, int32x4_t __b, const int __c)
/* vqmovn */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_s16 (int16x8_t __a)
{
return (int8x8_t) __builtin_aarch64_sqmovnv8hi (__a);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_s32 (int32x4_t __a)
{
return (int16x4_t) __builtin_aarch64_sqmovnv4si (__a);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_s64 (int64x2_t __a)
{
return (int32x2_t) __builtin_aarch64_sqmovnv2di (__a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_u16 (uint16x8_t __a)
{
return (uint8x8_t) __builtin_aarch64_uqmovnv8hi ((int16x8_t) __a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_u32 (uint32x4_t __a)
{
return (uint16x4_t) __builtin_aarch64_uqmovnv4si ((int32x4_t) __a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_u64 (uint64x2_t __a)
{
return (uint32x2_t) __builtin_aarch64_uqmovnv2di ((int64x2_t) __a);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovnh_s16 (int16_t __a)
{
return (int8_t) __builtin_aarch64_sqmovnhi (__a);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovns_s32 (int32_t __a)
{
return (int16_t) __builtin_aarch64_sqmovnsi (__a);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovnd_s64 (int64_t __a)
{
return (int32_t) __builtin_aarch64_sqmovndi (__a);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovnh_u16 (uint16_t __a)
{
return (uint8_t) __builtin_aarch64_uqmovnhi (__a);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovns_u32 (uint32_t __a)
{
return (uint16_t) __builtin_aarch64_uqmovnsi (__a);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovnd_u64 (uint64_t __a)
{
return (uint32_t) __builtin_aarch64_uqmovndi (__a);
@@ -19658,37 +23073,43 @@ vqmovnd_u64 (uint64_t __a)
/* vqmovun */
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovun_s16 (int16x8_t __a)
{
return (uint8x8_t) __builtin_aarch64_sqmovunv8hi (__a);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovun_s32 (int32x4_t __a)
{
return (uint16x4_t) __builtin_aarch64_sqmovunv4si (__a);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovun_s64 (int64x2_t __a)
{
return (uint32x2_t) __builtin_aarch64_sqmovunv2di (__a);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovunh_s16 (int16_t __a)
{
return (int8_t) __builtin_aarch64_sqmovunhi (__a);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovuns_s32 (int32_t __a)
{
return (int16_t) __builtin_aarch64_sqmovunsi (__a);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqmovund_s64 (int64_t __a)
{
return (int32_t) __builtin_aarch64_sqmovundi (__a);
@@ -19696,31 +23117,36 @@ vqmovund_s64 (int64_t __a)
/* vqneg */
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqnegq_s64 (int64x2_t __a)
{
return (int64x2_t) __builtin_aarch64_sqnegv2di (__a);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqnegb_s8 (int8_t __a)
{
return (int8_t) __builtin_aarch64_sqnegqi (__a);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqnegh_s16 (int16_t __a)
{
return (int16_t) __builtin_aarch64_sqneghi (__a);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqnegs_s32 (int32_t __a)
{
return (int32_t) __builtin_aarch64_sqnegsi (__a);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqnegd_s64 (int64_t __a)
{
return __builtin_aarch64_sqnegdi (__a);
@@ -19728,61 +23154,71 @@ vqnegd_s64 (int64_t __a)
/* vqrdmulh */
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
return __builtin_aarch64_sqrdmulh_lanev4hi (__a, __b, __c);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
return __builtin_aarch64_sqrdmulh_lanev2si (__a, __b, __c);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
{
return __builtin_aarch64_sqrdmulh_lanev8hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
{
return __builtin_aarch64_sqrdmulh_lanev4si (__a, __b, __c);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhh_s16 (int16_t __a, int16_t __b)
{
return (int16_t) __builtin_aarch64_sqrdmulhhi (__a, __b);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhh_lane_s16 (int16_t __a, int16x4_t __b, const int __c)
{
return __builtin_aarch64_sqrdmulh_lanehi (__a, __b, __c);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhh_laneq_s16 (int16_t __a, int16x8_t __b, const int __c)
{
return __builtin_aarch64_sqrdmulh_laneqhi (__a, __b, __c);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhs_s32 (int32_t __a, int32_t __b)
{
return (int32_t) __builtin_aarch64_sqrdmulhsi (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhs_lane_s32 (int32_t __a, int32x2_t __b, const int __c)
{
return __builtin_aarch64_sqrdmulh_lanesi (__a, __b, __c);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhs_laneq_s32 (int32_t __a, int32x4_t __b, const int __c)
{
return __builtin_aarch64_sqrdmulh_laneqsi (__a, __b, __c);
@@ -19790,145 +23226,169 @@ vqrdmulhs_laneq_s32 (int32_t __a, int32x4_t __b, const int __c)
/* vqrshl */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshl_s8 (int8x8_t __a, int8x8_t __b)
{
return __builtin_aarch64_sqrshlv8qi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshl_s16 (int16x4_t __a, int16x4_t __b)
{
return __builtin_aarch64_sqrshlv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshl_s32 (int32x2_t __a, int32x2_t __b)
{
return __builtin_aarch64_sqrshlv2si (__a, __b);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshl_s64 (int64x1_t __a, int64x1_t __b)
{
return (int64x1_t) {__builtin_aarch64_sqrshldi (__a[0], __b[0])};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshl_u8 (uint8x8_t __a, int8x8_t __b)
{
return __builtin_aarch64_uqrshlv8qi_uus ( __a, __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshl_u16 (uint16x4_t __a, int16x4_t __b)
{
return __builtin_aarch64_uqrshlv4hi_uus ( __a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshl_u32 (uint32x2_t __a, int32x2_t __b)
{
return __builtin_aarch64_uqrshlv2si_uus ( __a, __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshl_u64 (uint64x1_t __a, int64x1_t __b)
{
return (uint64x1_t) {__builtin_aarch64_uqrshldi_uus (__a[0], __b[0])};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshlq_s8 (int8x16_t __a, int8x16_t __b)
{
return __builtin_aarch64_sqrshlv16qi (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshlq_s16 (int16x8_t __a, int16x8_t __b)
{
return __builtin_aarch64_sqrshlv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshlq_s32 (int32x4_t __a, int32x4_t __b)
{
return __builtin_aarch64_sqrshlv4si (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshlq_s64 (int64x2_t __a, int64x2_t __b)
{
return __builtin_aarch64_sqrshlv2di (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshlq_u8 (uint8x16_t __a, int8x16_t __b)
{
return __builtin_aarch64_uqrshlv16qi_uus ( __a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshlq_u16 (uint16x8_t __a, int16x8_t __b)
{
return __builtin_aarch64_uqrshlv8hi_uus ( __a, __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshlq_u32 (uint32x4_t __a, int32x4_t __b)
{
return __builtin_aarch64_uqrshlv4si_uus ( __a, __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshlq_u64 (uint64x2_t __a, int64x2_t __b)
{
return __builtin_aarch64_uqrshlv2di_uus ( __a, __b);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshlb_s8 (int8_t __a, int8_t __b)
{
return __builtin_aarch64_sqrshlqi (__a, __b);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshlh_s16 (int16_t __a, int16_t __b)
{
return __builtin_aarch64_sqrshlhi (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshls_s32 (int32_t __a, int32_t __b)
{
return __builtin_aarch64_sqrshlsi (__a, __b);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshld_s64 (int64_t __a, int64_t __b)
{
return __builtin_aarch64_sqrshldi (__a, __b);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshlb_u8 (uint8_t __a, uint8_t __b)
{
return __builtin_aarch64_uqrshlqi_uus (__a, __b);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshlh_u16 (uint16_t __a, uint16_t __b)
{
return __builtin_aarch64_uqrshlhi_uus (__a, __b);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshls_u32 (uint32_t __a, uint32_t __b)
{
return __builtin_aarch64_uqrshlsi_uus (__a, __b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshld_u64 (uint64_t __a, uint64_t __b)
{
return __builtin_aarch64_uqrshldi_uus (__a, __b);
@@ -19936,73 +23396,85 @@ vqrshld_u64 (uint64_t __a, uint64_t __b)
/* vqrshrn */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshrn_n_s16 (int16x8_t __a, const int __b)
{
return (int8x8_t) __builtin_aarch64_sqrshrn_nv8hi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshrn_n_s32 (int32x4_t __a, const int __b)
{
return (int16x4_t) __builtin_aarch64_sqrshrn_nv4si (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshrn_n_s64 (int64x2_t __a, const int __b)
{
return (int32x2_t) __builtin_aarch64_sqrshrn_nv2di (__a, __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshrn_n_u16 (uint16x8_t __a, const int __b)
{
return __builtin_aarch64_uqrshrn_nv8hi_uus ( __a, __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshrn_n_u32 (uint32x4_t __a, const int __b)
{
return __builtin_aarch64_uqrshrn_nv4si_uus ( __a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshrn_n_u64 (uint64x2_t __a, const int __b)
{
return __builtin_aarch64_uqrshrn_nv2di_uus ( __a, __b);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshrnh_n_s16 (int16_t __a, const int __b)
{
return (int8_t) __builtin_aarch64_sqrshrn_nhi (__a, __b);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshrns_n_s32 (int32_t __a, const int __b)
{
return (int16_t) __builtin_aarch64_sqrshrn_nsi (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshrnd_n_s64 (int64_t __a, const int __b)
{
return (int32_t) __builtin_aarch64_sqrshrn_ndi (__a, __b);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshrnh_n_u16 (uint16_t __a, const int __b)
{
return __builtin_aarch64_uqrshrn_nhi_uus (__a, __b);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshrns_n_u32 (uint32_t __a, const int __b)
{
return __builtin_aarch64_uqrshrn_nsi_uus (__a, __b);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshrnd_n_u64 (uint64_t __a, const int __b)
{
return __builtin_aarch64_uqrshrn_ndi_uus (__a, __b);
@@ -20010,37 +23482,43 @@ vqrshrnd_n_u64 (uint64_t __a, const int __b)
/* vqrshrun */
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshrun_n_s16 (int16x8_t __a, const int __b)
{
return (uint8x8_t) __builtin_aarch64_sqrshrun_nv8hi (__a, __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshrun_n_s32 (int32x4_t __a, const int __b)
{
return (uint16x4_t) __builtin_aarch64_sqrshrun_nv4si (__a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshrun_n_s64 (int64x2_t __a, const int __b)
{
return (uint32x2_t) __builtin_aarch64_sqrshrun_nv2di (__a, __b);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshrunh_n_s16 (int16_t __a, const int __b)
{
return (int8_t) __builtin_aarch64_sqrshrun_nhi (__a, __b);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshruns_n_s32 (int32_t __a, const int __b)
{
return (int16_t) __builtin_aarch64_sqrshrun_nsi (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqrshrund_n_s64 (int64_t __a, const int __b)
{
return (int32_t) __builtin_aarch64_sqrshrun_ndi (__a, __b);
@@ -20048,289 +23526,337 @@ vqrshrund_n_s64 (int64_t __a, const int __b)
/* vqshl */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshl_s8 (int8x8_t __a, int8x8_t __b)
{
return __builtin_aarch64_sqshlv8qi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshl_s16 (int16x4_t __a, int16x4_t __b)
{
return __builtin_aarch64_sqshlv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshl_s32 (int32x2_t __a, int32x2_t __b)
{
return __builtin_aarch64_sqshlv2si (__a, __b);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshl_s64 (int64x1_t __a, int64x1_t __b)
{
return (int64x1_t) {__builtin_aarch64_sqshldi (__a[0], __b[0])};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshl_u8 (uint8x8_t __a, int8x8_t __b)
{
return __builtin_aarch64_uqshlv8qi_uus ( __a, __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshl_u16 (uint16x4_t __a, int16x4_t __b)
{
return __builtin_aarch64_uqshlv4hi_uus ( __a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshl_u32 (uint32x2_t __a, int32x2_t __b)
{
return __builtin_aarch64_uqshlv2si_uus ( __a, __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshl_u64 (uint64x1_t __a, int64x1_t __b)
{
return (uint64x1_t) {__builtin_aarch64_uqshldi_uus (__a[0], __b[0])};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_s8 (int8x16_t __a, int8x16_t __b)
{
return __builtin_aarch64_sqshlv16qi (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_s16 (int16x8_t __a, int16x8_t __b)
{
return __builtin_aarch64_sqshlv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_s32 (int32x4_t __a, int32x4_t __b)
{
return __builtin_aarch64_sqshlv4si (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_s64 (int64x2_t __a, int64x2_t __b)
{
return __builtin_aarch64_sqshlv2di (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_u8 (uint8x16_t __a, int8x16_t __b)
{
return __builtin_aarch64_uqshlv16qi_uus ( __a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_u16 (uint16x8_t __a, int16x8_t __b)
{
return __builtin_aarch64_uqshlv8hi_uus ( __a, __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_u32 (uint32x4_t __a, int32x4_t __b)
{
return __builtin_aarch64_uqshlv4si_uus ( __a, __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_u64 (uint64x2_t __a, int64x2_t __b)
{
return __builtin_aarch64_uqshlv2di_uus ( __a, __b);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlb_s8 (int8_t __a, int8_t __b)
{
return __builtin_aarch64_sqshlqi (__a, __b);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlh_s16 (int16_t __a, int16_t __b)
{
return __builtin_aarch64_sqshlhi (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshls_s32 (int32_t __a, int32_t __b)
{
return __builtin_aarch64_sqshlsi (__a, __b);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshld_s64 (int64_t __a, int64_t __b)
{
return __builtin_aarch64_sqshldi (__a, __b);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlb_u8 (uint8_t __a, uint8_t __b)
{
return __builtin_aarch64_uqshlqi_uus (__a, __b);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlh_u16 (uint16_t __a, uint16_t __b)
{
return __builtin_aarch64_uqshlhi_uus (__a, __b);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshls_u32 (uint32_t __a, uint32_t __b)
{
return __builtin_aarch64_uqshlsi_uus (__a, __b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshld_u64 (uint64_t __a, uint64_t __b)
{
return __builtin_aarch64_uqshldi_uus (__a, __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshl_n_s8 (int8x8_t __a, const int __b)
{
return (int8x8_t) __builtin_aarch64_sqshl_nv8qi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshl_n_s16 (int16x4_t __a, const int __b)
{
return (int16x4_t) __builtin_aarch64_sqshl_nv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshl_n_s32 (int32x2_t __a, const int __b)
{
return (int32x2_t) __builtin_aarch64_sqshl_nv2si (__a, __b);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshl_n_s64 (int64x1_t __a, const int __b)
{
return (int64x1_t) {__builtin_aarch64_sqshl_ndi (__a[0], __b)};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshl_n_u8 (uint8x8_t __a, const int __b)
{
return __builtin_aarch64_uqshl_nv8qi_uus (__a, __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshl_n_u16 (uint16x4_t __a, const int __b)
{
return __builtin_aarch64_uqshl_nv4hi_uus (__a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshl_n_u32 (uint32x2_t __a, const int __b)
{
return __builtin_aarch64_uqshl_nv2si_uus (__a, __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshl_n_u64 (uint64x1_t __a, const int __b)
{
return (uint64x1_t) {__builtin_aarch64_uqshl_ndi_uus (__a[0], __b)};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_n_s8 (int8x16_t __a, const int __b)
{
return (int8x16_t) __builtin_aarch64_sqshl_nv16qi (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_n_s16 (int16x8_t __a, const int __b)
{
return (int16x8_t) __builtin_aarch64_sqshl_nv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_n_s32 (int32x4_t __a, const int __b)
{
return (int32x4_t) __builtin_aarch64_sqshl_nv4si (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_n_s64 (int64x2_t __a, const int __b)
{
return (int64x2_t) __builtin_aarch64_sqshl_nv2di (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_n_u8 (uint8x16_t __a, const int __b)
{
return __builtin_aarch64_uqshl_nv16qi_uus (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_n_u16 (uint16x8_t __a, const int __b)
{
return __builtin_aarch64_uqshl_nv8hi_uus (__a, __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_n_u32 (uint32x4_t __a, const int __b)
{
return __builtin_aarch64_uqshl_nv4si_uus (__a, __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_n_u64 (uint64x2_t __a, const int __b)
{
return __builtin_aarch64_uqshl_nv2di_uus (__a, __b);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlb_n_s8 (int8_t __a, const int __b)
{
return (int8_t) __builtin_aarch64_sqshl_nqi (__a, __b);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlh_n_s16 (int16_t __a, const int __b)
{
return (int16_t) __builtin_aarch64_sqshl_nhi (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshls_n_s32 (int32_t __a, const int __b)
{
return (int32_t) __builtin_aarch64_sqshl_nsi (__a, __b);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshld_n_s64 (int64_t __a, const int __b)
{
return __builtin_aarch64_sqshl_ndi (__a, __b);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlb_n_u8 (uint8_t __a, const int __b)
{
return __builtin_aarch64_uqshl_nqi_uus (__a, __b);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlh_n_u16 (uint16_t __a, const int __b)
{
return __builtin_aarch64_uqshl_nhi_uus (__a, __b);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshls_n_u32 (uint32_t __a, const int __b)
{
return __builtin_aarch64_uqshl_nsi_uus (__a, __b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshld_n_u64 (uint64_t __a, const int __b)
{
return __builtin_aarch64_uqshl_ndi_uus (__a, __b);
@@ -20338,73 +23864,85 @@ vqshld_n_u64 (uint64_t __a, const int __b)
/* vqshlu */
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlu_n_s8 (int8x8_t __a, const int __b)
{
return __builtin_aarch64_sqshlu_nv8qi_uss (__a, __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlu_n_s16 (int16x4_t __a, const int __b)
{
return __builtin_aarch64_sqshlu_nv4hi_uss (__a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlu_n_s32 (int32x2_t __a, const int __b)
{
return __builtin_aarch64_sqshlu_nv2si_uss (__a, __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlu_n_s64 (int64x1_t __a, const int __b)
{
return (uint64x1_t) {__builtin_aarch64_sqshlu_ndi_uss (__a[0], __b)};
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshluq_n_s8 (int8x16_t __a, const int __b)
{
return __builtin_aarch64_sqshlu_nv16qi_uss (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshluq_n_s16 (int16x8_t __a, const int __b)
{
return __builtin_aarch64_sqshlu_nv8hi_uss (__a, __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshluq_n_s32 (int32x4_t __a, const int __b)
{
return __builtin_aarch64_sqshlu_nv4si_uss (__a, __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshluq_n_s64 (int64x2_t __a, const int __b)
{
return __builtin_aarch64_sqshlu_nv2di_uss (__a, __b);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlub_n_s8 (int8_t __a, const int __b)
{
return (int8_t) __builtin_aarch64_sqshlu_nqi_uss (__a, __b);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshluh_n_s16 (int16_t __a, const int __b)
{
return (int16_t) __builtin_aarch64_sqshlu_nhi_uss (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlus_n_s32 (int32_t __a, const int __b)
{
return (int32_t) __builtin_aarch64_sqshlu_nsi_uss (__a, __b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshlud_n_s64 (int64_t __a, const int __b)
{
return __builtin_aarch64_sqshlu_ndi_uss (__a, __b);
@@ -20412,73 +23950,85 @@ vqshlud_n_s64 (int64_t __a, const int __b)
/* vqshrn */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_n_s16 (int16x8_t __a, const int __b)
{
return (int8x8_t) __builtin_aarch64_sqshrn_nv8hi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_n_s32 (int32x4_t __a, const int __b)
{
return (int16x4_t) __builtin_aarch64_sqshrn_nv4si (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_n_s64 (int64x2_t __a, const int __b)
{
return (int32x2_t) __builtin_aarch64_sqshrn_nv2di (__a, __b);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_n_u16 (uint16x8_t __a, const int __b)
{
return __builtin_aarch64_uqshrn_nv8hi_uus ( __a, __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_n_u32 (uint32x4_t __a, const int __b)
{
return __builtin_aarch64_uqshrn_nv4si_uus ( __a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_n_u64 (uint64x2_t __a, const int __b)
{
return __builtin_aarch64_uqshrn_nv2di_uus ( __a, __b);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrnh_n_s16 (int16_t __a, const int __b)
{
return (int8_t) __builtin_aarch64_sqshrn_nhi (__a, __b);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrns_n_s32 (int32_t __a, const int __b)
{
return (int16_t) __builtin_aarch64_sqshrn_nsi (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrnd_n_s64 (int64_t __a, const int __b)
{
return (int32_t) __builtin_aarch64_sqshrn_ndi (__a, __b);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrnh_n_u16 (uint16_t __a, const int __b)
{
return __builtin_aarch64_uqshrn_nhi_uus (__a, __b);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrns_n_u32 (uint32_t __a, const int __b)
{
return __builtin_aarch64_uqshrn_nsi_uus (__a, __b);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrnd_n_u64 (uint64_t __a, const int __b)
{
return __builtin_aarch64_uqshrn_ndi_uus (__a, __b);
@@ -20486,37 +24036,43 @@ vqshrnd_n_u64 (uint64_t __a, const int __b)
/* vqshrun */
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrun_n_s16 (int16x8_t __a, const int __b)
{
return (uint8x8_t) __builtin_aarch64_sqshrun_nv8hi (__a, __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrun_n_s32 (int32x4_t __a, const int __b)
{
return (uint16x4_t) __builtin_aarch64_sqshrun_nv4si (__a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrun_n_s64 (int64x2_t __a, const int __b)
{
return (uint32x2_t) __builtin_aarch64_sqshrun_nv2di (__a, __b);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrunh_n_s16 (int16_t __a, const int __b)
{
return (int8_t) __builtin_aarch64_sqshrun_nhi (__a, __b);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshruns_n_s32 (int32_t __a, const int __b)
{
return (int16_t) __builtin_aarch64_sqshrun_nsi (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqshrund_n_s64 (int64_t __a, const int __b)
{
return (int32_t) __builtin_aarch64_sqshrun_ndi (__a, __b);
@@ -20524,49 +24080,57 @@ vqshrund_n_s64 (int64_t __a, const int __b)
/* vqsub */
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubb_s8 (int8_t __a, int8_t __b)
{
return (int8_t) __builtin_aarch64_sqsubqi (__a, __b);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubh_s16 (int16_t __a, int16_t __b)
{
return (int16_t) __builtin_aarch64_sqsubhi (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubs_s32 (int32_t __a, int32_t __b)
{
return (int32_t) __builtin_aarch64_sqsubsi (__a, __b);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubd_s64 (int64_t __a, int64_t __b)
{
return __builtin_aarch64_sqsubdi (__a, __b);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubb_u8 (uint8_t __a, uint8_t __b)
{
return (uint8_t) __builtin_aarch64_uqsubqi_uuu (__a, __b);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubh_u16 (uint16_t __a, uint16_t __b)
{
return (uint16_t) __builtin_aarch64_uqsubhi_uuu (__a, __b);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubs_u32 (uint32_t __a, uint32_t __b)
{
return (uint32_t) __builtin_aarch64_uqsubsi_uuu (__a, __b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqsubd_u64 (uint64_t __a, uint64_t __b)
{
return __builtin_aarch64_uqsubdi_uuu (__a, __b);
@@ -20574,7 +24138,8 @@ vqsubd_u64 (uint64_t __a, uint64_t __b)
/* vqtbl2 */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl2_s8 (int8x16x2_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_oi __o;
@@ -20583,7 +24148,8 @@ vqtbl2_s8 (int8x16x2_t tab, uint8x8_t idx)
return __builtin_aarch64_tbl3v8qi (__o, (int8x8_t)idx);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl2_u8 (uint8x16x2_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_oi __o;
@@ -20592,7 +24158,8 @@ vqtbl2_u8 (uint8x16x2_t tab, uint8x8_t idx)
return (uint8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)idx);
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl2_p8 (poly8x16x2_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_oi __o;
@@ -20601,7 +24168,8 @@ vqtbl2_p8 (poly8x16x2_t tab, uint8x8_t idx)
return (poly8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)idx);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl2q_s8 (int8x16x2_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_oi __o;
@@ -20610,7 +24178,8 @@ vqtbl2q_s8 (int8x16x2_t tab, uint8x16_t idx)
return __builtin_aarch64_tbl3v16qi (__o, (int8x16_t)idx);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl2q_u8 (uint8x16x2_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_oi __o;
@@ -20619,7 +24188,8 @@ vqtbl2q_u8 (uint8x16x2_t tab, uint8x16_t idx)
return (uint8x16_t)__builtin_aarch64_tbl3v16qi (__o, (int8x16_t)idx);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl2q_p8 (poly8x16x2_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_oi __o;
@@ -20630,7 +24200,8 @@ vqtbl2q_p8 (poly8x16x2_t tab, uint8x16_t idx)
/* vqtbl3 */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl3_s8 (int8x16x3_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_ci __o;
@@ -20640,7 +24211,8 @@ vqtbl3_s8 (int8x16x3_t tab, uint8x8_t idx)
return __builtin_aarch64_qtbl3v8qi (__o, (int8x8_t)idx);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl3_u8 (uint8x16x3_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_ci __o;
@@ -20650,7 +24222,8 @@ vqtbl3_u8 (uint8x16x3_t tab, uint8x8_t idx)
return (uint8x8_t)__builtin_aarch64_qtbl3v8qi (__o, (int8x8_t)idx);
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl3_p8 (poly8x16x3_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_ci __o;
@@ -20660,7 +24233,8 @@ vqtbl3_p8 (poly8x16x3_t tab, uint8x8_t idx)
return (poly8x8_t)__builtin_aarch64_qtbl3v8qi (__o, (int8x8_t)idx);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl3q_s8 (int8x16x3_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_ci __o;
@@ -20670,7 +24244,8 @@ vqtbl3q_s8 (int8x16x3_t tab, uint8x16_t idx)
return __builtin_aarch64_qtbl3v16qi (__o, (int8x16_t)idx);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl3q_u8 (uint8x16x3_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_ci __o;
@@ -20680,7 +24255,8 @@ vqtbl3q_u8 (uint8x16x3_t tab, uint8x16_t idx)
return (uint8x16_t)__builtin_aarch64_qtbl3v16qi (__o, (int8x16_t)idx);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl3q_p8 (poly8x16x3_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_ci __o;
@@ -20692,7 +24268,8 @@ vqtbl3q_p8 (poly8x16x3_t tab, uint8x16_t idx)
/* vqtbl4 */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl4_s8 (int8x16x4_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_xi __o;
@@ -20703,7 +24280,8 @@ vqtbl4_s8 (int8x16x4_t tab, uint8x8_t idx)
return __builtin_aarch64_qtbl4v8qi (__o, (int8x8_t)idx);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl4_u8 (uint8x16x4_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_xi __o;
@@ -20714,7 +24292,8 @@ vqtbl4_u8 (uint8x16x4_t tab, uint8x8_t idx)
return (uint8x8_t)__builtin_aarch64_qtbl4v8qi (__o, (int8x8_t)idx);
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl4_p8 (poly8x16x4_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_xi __o;
@@ -20725,7 +24304,8 @@ vqtbl4_p8 (poly8x16x4_t tab, uint8x8_t idx)
return (poly8x8_t)__builtin_aarch64_qtbl4v8qi (__o, (int8x8_t)idx);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl4q_s8 (int8x16x4_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_xi __o;
@@ -20736,7 +24316,8 @@ vqtbl4q_s8 (int8x16x4_t tab, uint8x16_t idx)
return __builtin_aarch64_qtbl4v16qi (__o, (int8x16_t)idx);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl4q_u8 (uint8x16x4_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_xi __o;
@@ -20747,7 +24328,8 @@ vqtbl4q_u8 (uint8x16x4_t tab, uint8x16_t idx)
return (uint8x16_t)__builtin_aarch64_qtbl4v16qi (__o, (int8x16_t)idx);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbl4q_p8 (poly8x16x4_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_xi __o;
@@ -20760,7 +24342,8 @@ vqtbl4q_p8 (poly8x16x4_t tab, uint8x16_t idx)
/* vqtbx2 */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx2_s8 (int8x8_t r, int8x16x2_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_oi __o;
@@ -20769,7 +24352,8 @@ vqtbx2_s8 (int8x8_t r, int8x16x2_t tab, uint8x8_t idx)
return __builtin_aarch64_tbx4v8qi (r, __o, (int8x8_t)idx);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx2_u8 (uint8x8_t r, uint8x16x2_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_oi __o;
@@ -20779,7 +24363,8 @@ vqtbx2_u8 (uint8x8_t r, uint8x16x2_t tab, uint8x8_t idx)
(int8x8_t)idx);
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx2_p8 (poly8x8_t r, poly8x16x2_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_oi __o;
@@ -20789,7 +24374,8 @@ vqtbx2_p8 (poly8x8_t r, poly8x16x2_t tab, uint8x8_t idx)
(int8x8_t)idx);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx2q_s8 (int8x16_t r, int8x16x2_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_oi __o;
@@ -20798,7 +24384,8 @@ vqtbx2q_s8 (int8x16_t r, int8x16x2_t tab, uint8x16_t idx)
return __builtin_aarch64_tbx4v16qi (r, __o, (int8x16_t)idx);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx2q_u8 (uint8x16_t r, uint8x16x2_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_oi __o;
@@ -20808,7 +24395,8 @@ vqtbx2q_u8 (uint8x16_t r, uint8x16x2_t tab, uint8x16_t idx)
(int8x16_t)idx);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx2q_p8 (poly8x16_t r, poly8x16x2_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_oi __o;
@@ -20819,7 +24407,8 @@ vqtbx2q_p8 (poly8x16_t r, poly8x16x2_t tab, uint8x16_t idx)
}
/* vqtbx3 */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx3_s8 (int8x8_t r, int8x16x3_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_ci __o;
@@ -20829,7 +24418,8 @@ vqtbx3_s8 (int8x8_t r, int8x16x3_t tab, uint8x8_t idx)
return __builtin_aarch64_qtbx3v8qi (r, __o, (int8x8_t)idx);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx3_u8 (uint8x8_t r, uint8x16x3_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_ci __o;
@@ -20840,7 +24430,8 @@ vqtbx3_u8 (uint8x8_t r, uint8x16x3_t tab, uint8x8_t idx)
(int8x8_t)idx);
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx3_p8 (poly8x8_t r, poly8x16x3_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_ci __o;
@@ -20851,7 +24442,8 @@ vqtbx3_p8 (poly8x8_t r, poly8x16x3_t tab, uint8x8_t idx)
(int8x8_t)idx);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx3q_s8 (int8x16_t r, int8x16x3_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_ci __o;
@@ -20861,7 +24453,8 @@ vqtbx3q_s8 (int8x16_t r, int8x16x3_t tab, uint8x16_t idx)
return __builtin_aarch64_qtbx3v16qi (r, __o, (int8x16_t)idx);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx3q_u8 (uint8x16_t r, uint8x16x3_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_ci __o;
@@ -20872,7 +24465,8 @@ vqtbx3q_u8 (uint8x16_t r, uint8x16x3_t tab, uint8x16_t idx)
(int8x16_t)idx);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx3q_p8 (poly8x16_t r, poly8x16x3_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_ci __o;
@@ -20885,7 +24479,8 @@ vqtbx3q_p8 (poly8x16_t r, poly8x16x3_t tab, uint8x16_t idx)
/* vqtbx4 */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx4_s8 (int8x8_t r, int8x16x4_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_xi __o;
@@ -20896,7 +24491,8 @@ vqtbx4_s8 (int8x8_t r, int8x16x4_t tab, uint8x8_t idx)
return __builtin_aarch64_qtbx4v8qi (r, __o, (int8x8_t)idx);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx4_u8 (uint8x8_t r, uint8x16x4_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_xi __o;
@@ -20908,7 +24504,8 @@ vqtbx4_u8 (uint8x8_t r, uint8x16x4_t tab, uint8x8_t idx)
(int8x8_t)idx);
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx4_p8 (poly8x8_t r, poly8x16x4_t tab, uint8x8_t idx)
{
__builtin_aarch64_simd_xi __o;
@@ -20920,7 +24517,8 @@ vqtbx4_p8 (poly8x8_t r, poly8x16x4_t tab, uint8x8_t idx)
(int8x8_t)idx);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx4q_s8 (int8x16_t r, int8x16x4_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_xi __o;
@@ -20931,7 +24529,8 @@ vqtbx4q_s8 (int8x16_t r, int8x16x4_t tab, uint8x16_t idx)
return __builtin_aarch64_qtbx4v16qi (r, __o, (int8x16_t)idx);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx4q_u8 (uint8x16_t r, uint8x16x4_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_xi __o;
@@ -20943,7 +24542,8 @@ vqtbx4q_u8 (uint8x16_t r, uint8x16x4_t tab, uint8x16_t idx)
(int8x16_t)idx);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vqtbx4q_p8 (poly8x16_t r, poly8x16x4_t tab, uint8x16_t idx)
{
__builtin_aarch64_simd_xi __o;
@@ -20957,37 +24557,43 @@ vqtbx4q_p8 (poly8x16_t r, poly8x16x4_t tab, uint8x16_t idx)
/* vrbit */
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrbit_p8 (poly8x8_t __a)
{
return (poly8x8_t) __builtin_aarch64_rbitv8qi ((int8x8_t) __a);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrbit_s8 (int8x8_t __a)
{
return __builtin_aarch64_rbitv8qi (__a);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrbit_u8 (uint8x8_t __a)
{
return (uint8x8_t) __builtin_aarch64_rbitv8qi ((int8x8_t) __a);
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrbitq_p8 (poly8x16_t __a)
{
return (poly8x16_t) __builtin_aarch64_rbitv16qi ((int8x16_t)__a);
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrbitq_s8 (int8x16_t __a)
{
return __builtin_aarch64_rbitv16qi (__a);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrbitq_u8 (uint8x16_t __a)
{
return (uint8x16_t) __builtin_aarch64_rbitv16qi ((int8x16_t) __a);
@@ -20995,43 +24601,57 @@ vrbitq_u8 (uint8x16_t __a)
/* vrecpe */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrecpe_u32 (uint32x2_t __a)
{
return (uint32x2_t) __builtin_aarch64_urecpev2si ((int32x2_t) __a);
}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrecpeq_u32 (uint32x4_t __a)
{
return (uint32x4_t) __builtin_aarch64_urecpev4si ((int32x4_t) __a);
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrecpes_f32 (float32_t __a)
{
return __builtin_aarch64_frecpesf (__a);
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrecped_f64 (float64_t __a)
{
return __builtin_aarch64_frecpedf (__a);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrecpe_f32 (float32x2_t __a)
{
return __builtin_aarch64_frecpev2sf (__a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrecpe_f64 (float64x1_t __a)
+{
+ return (float64x1_t) { vrecped_f64 (vget_lane_f64 (__a, 0)) };
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrecpeq_f32 (float32x4_t __a)
{
return __builtin_aarch64_frecpev4sf (__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrecpeq_f64 (float64x2_t __a)
{
return __builtin_aarch64_frecpev2df (__a);
@@ -21039,31 +24659,44 @@ vrecpeq_f64 (float64x2_t __a)
/* vrecps */
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrecpss_f32 (float32_t __a, float32_t __b)
{
return __builtin_aarch64_frecpssf (__a, __b);
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrecpsd_f64 (float64_t __a, float64_t __b)
{
return __builtin_aarch64_frecpsdf (__a, __b);
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrecps_f32 (float32x2_t __a, float32x2_t __b)
{
return __builtin_aarch64_frecpsv2sf (__a, __b);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrecps_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return (float64x1_t) { vrecpsd_f64 (vget_lane_f64 (__a, 0),
+ vget_lane_f64 (__b, 0)) };
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrecpsq_f32 (float32x4_t __a, float32x4_t __b)
{
return __builtin_aarch64_frecpsv4sf (__a, __b);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrecpsq_f64 (float64x2_t __a, float64x2_t __b)
{
return __builtin_aarch64_frecpsv2df (__a, __b);
@@ -21071,13 +24704,15 @@ vrecpsq_f64 (float64x2_t __a, float64x2_t __b)
/* vrecpx */
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrecpxs_f32 (float32_t __a)
{
return __builtin_aarch64_frecpxsf (__a);
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrecpxd_f64 (float64_t __a)
{
return __builtin_aarch64_frecpxdf (__a);
@@ -21086,226 +24721,276 @@ vrecpxd_f64 (float64_t __a)
/* vrev */
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev16_p8 (poly8x8_t a)
{
return __builtin_shuffle (a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev16_s8 (int8x8_t a)
{
return __builtin_shuffle (a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev16_u8 (uint8x8_t a)
{
return __builtin_shuffle (a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev16q_p8 (poly8x16_t a)
{
return __builtin_shuffle (a,
(uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 });
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev16q_s8 (int8x16_t a)
{
return __builtin_shuffle (a,
(uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 });
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev16q_u8 (uint8x16_t a)
{
return __builtin_shuffle (a,
(uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 });
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev32_p8 (poly8x8_t a)
{
return __builtin_shuffle (a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev32_p16 (poly16x4_t a)
{
return __builtin_shuffle (a, (uint16x4_t) { 1, 0, 3, 2 });
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev32_s8 (int8x8_t a)
{
return __builtin_shuffle (a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev32_s16 (int16x4_t a)
{
return __builtin_shuffle (a, (uint16x4_t) { 1, 0, 3, 2 });
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev32_u8 (uint8x8_t a)
{
return __builtin_shuffle (a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev32_u16 (uint16x4_t a)
{
return __builtin_shuffle (a, (uint16x4_t) { 1, 0, 3, 2 });
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev32q_p8 (poly8x16_t a)
{
return __builtin_shuffle (a,
(uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 });
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev32q_p16 (poly16x8_t a)
{
return __builtin_shuffle (a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev32q_s8 (int8x16_t a)
{
return __builtin_shuffle (a,
(uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 });
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev32q_s16 (int16x8_t a)
{
return __builtin_shuffle (a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev32q_u8 (uint8x16_t a)
{
return __builtin_shuffle (a,
(uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 });
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev32q_u16 (uint16x8_t a)
{
return __builtin_shuffle (a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64_f16 (float16x4_t __a)
+{
+ return __builtin_shuffle (__a, (uint16x4_t) { 3, 2, 1, 0 });
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64_f32 (float32x2_t a)
{
return __builtin_shuffle (a, (uint32x2_t) { 1, 0 });
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64_p8 (poly8x8_t a)
{
return __builtin_shuffle (a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 });
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64_p16 (poly16x4_t a)
{
return __builtin_shuffle (a, (uint16x4_t) { 3, 2, 1, 0 });
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64_s8 (int8x8_t a)
{
return __builtin_shuffle (a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 });
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64_s16 (int16x4_t a)
{
return __builtin_shuffle (a, (uint16x4_t) { 3, 2, 1, 0 });
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64_s32 (int32x2_t a)
{
return __builtin_shuffle (a, (uint32x2_t) { 1, 0 });
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64_u8 (uint8x8_t a)
{
return __builtin_shuffle (a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 });
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64_u16 (uint16x4_t a)
{
return __builtin_shuffle (a, (uint16x4_t) { 3, 2, 1, 0 });
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64_u32 (uint32x2_t a)
{
return __builtin_shuffle (a, (uint32x2_t) { 1, 0 });
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrev64q_f16 (float16x8_t __a)
+{
+ return __builtin_shuffle (__a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_f32 (float32x4_t a)
{
return __builtin_shuffle (a, (uint32x4_t) { 1, 0, 3, 2 });
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_p8 (poly8x16_t a)
{
return __builtin_shuffle (a,
(uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 });
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_p16 (poly16x8_t a)
{
return __builtin_shuffle (a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_s8 (int8x16_t a)
{
return __builtin_shuffle (a,
(uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 });
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_s16 (int16x8_t a)
{
return __builtin_shuffle (a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_s32 (int32x4_t a)
{
return __builtin_shuffle (a, (uint32x4_t) { 1, 0, 3, 2 });
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_u8 (uint8x16_t a)
{
return __builtin_shuffle (a,
(uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 });
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_u16 (uint16x8_t a)
{
return __builtin_shuffle (a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_u32 (uint32x4_t a)
{
return __builtin_shuffle (a, (uint32x4_t) { 1, 0, 3, 2 });
@@ -21313,25 +24998,29 @@ vrev64q_u32 (uint32x4_t a)
/* vrnd */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrnd_f32 (float32x2_t __a)
{
return __builtin_aarch64_btruncv2sf (__a);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrnd_f64 (float64x1_t __a)
{
return vset_lane_f64 (__builtin_trunc (vget_lane_f64 (__a, 0)), __a, 0);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndq_f32 (float32x4_t __a)
{
return __builtin_aarch64_btruncv4sf (__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndq_f64 (float64x2_t __a)
{
return __builtin_aarch64_btruncv2df (__a);
@@ -21339,25 +25028,29 @@ vrndq_f64 (float64x2_t __a)
/* vrnda */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrnda_f32 (float32x2_t __a)
{
return __builtin_aarch64_roundv2sf (__a);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrnda_f64 (float64x1_t __a)
{
return vset_lane_f64 (__builtin_round (vget_lane_f64 (__a, 0)), __a, 0);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndaq_f32 (float32x4_t __a)
{
return __builtin_aarch64_roundv4sf (__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndaq_f64 (float64x2_t __a)
{
return __builtin_aarch64_roundv2df (__a);
@@ -21365,25 +25058,29 @@ vrndaq_f64 (float64x2_t __a)
/* vrndi */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndi_f32 (float32x2_t __a)
{
return __builtin_aarch64_nearbyintv2sf (__a);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndi_f64 (float64x1_t __a)
{
return vset_lane_f64 (__builtin_nearbyint (vget_lane_f64 (__a, 0)), __a, 0);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndiq_f32 (float32x4_t __a)
{
return __builtin_aarch64_nearbyintv4sf (__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndiq_f64 (float64x2_t __a)
{
return __builtin_aarch64_nearbyintv2df (__a);
@@ -21391,25 +25088,29 @@ vrndiq_f64 (float64x2_t __a)
/* vrndm */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndm_f32 (float32x2_t __a)
{
return __builtin_aarch64_floorv2sf (__a);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndm_f64 (float64x1_t __a)
{
return vset_lane_f64 (__builtin_floor (vget_lane_f64 (__a, 0)), __a, 0);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndmq_f32 (float32x4_t __a)
{
return __builtin_aarch64_floorv4sf (__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndmq_f64 (float64x2_t __a)
{
return __builtin_aarch64_floorv2df (__a);
@@ -21417,25 +25118,29 @@ vrndmq_f64 (float64x2_t __a)
/* vrndn */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndn_f32 (float32x2_t __a)
{
return __builtin_aarch64_frintnv2sf (__a);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndn_f64 (float64x1_t __a)
{
return (float64x1_t) {__builtin_aarch64_frintndf (__a[0])};
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndnq_f32 (float32x4_t __a)
{
return __builtin_aarch64_frintnv4sf (__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndnq_f64 (float64x2_t __a)
{
return __builtin_aarch64_frintnv2df (__a);
@@ -21443,25 +25148,29 @@ vrndnq_f64 (float64x2_t __a)
/* vrndp */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndp_f32 (float32x2_t __a)
{
return __builtin_aarch64_ceilv2sf (__a);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndp_f64 (float64x1_t __a)
{
return vset_lane_f64 (__builtin_ceil (vget_lane_f64 (__a, 0)), __a, 0);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndpq_f32 (float32x4_t __a)
{
return __builtin_aarch64_ceilv4sf (__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndpq_f64 (float64x2_t __a)
{
return __builtin_aarch64_ceilv2df (__a);
@@ -21469,25 +25178,29 @@ vrndpq_f64 (float64x2_t __a)
/* vrndx */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndx_f32 (float32x2_t __a)
{
return __builtin_aarch64_rintv2sf (__a);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndx_f64 (float64x1_t __a)
{
return vset_lane_f64 (__builtin_rint (vget_lane_f64 (__a, 0)), __a, 0);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndxq_f32 (float32x4_t __a)
{
return __builtin_aarch64_rintv4sf (__a);
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrndxq_f64 (float64x2_t __a)
{
return __builtin_aarch64_rintv2df (__a);
@@ -21495,109 +25208,127 @@ vrndxq_f64 (float64x2_t __a)
/* vrshl */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshl_s8 (int8x8_t __a, int8x8_t __b)
{
return (int8x8_t) __builtin_aarch64_srshlv8qi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshl_s16 (int16x4_t __a, int16x4_t __b)
{
return (int16x4_t) __builtin_aarch64_srshlv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshl_s32 (int32x2_t __a, int32x2_t __b)
{
return (int32x2_t) __builtin_aarch64_srshlv2si (__a, __b);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshl_s64 (int64x1_t __a, int64x1_t __b)
{
return (int64x1_t) {__builtin_aarch64_srshldi (__a[0], __b[0])};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshl_u8 (uint8x8_t __a, int8x8_t __b)
{
return __builtin_aarch64_urshlv8qi_uus (__a, __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshl_u16 (uint16x4_t __a, int16x4_t __b)
{
return __builtin_aarch64_urshlv4hi_uus (__a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshl_u32 (uint32x2_t __a, int32x2_t __b)
{
return __builtin_aarch64_urshlv2si_uus (__a, __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshl_u64 (uint64x1_t __a, int64x1_t __b)
{
return (uint64x1_t) {__builtin_aarch64_urshldi_uus (__a[0], __b[0])};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshlq_s8 (int8x16_t __a, int8x16_t __b)
{
return (int8x16_t) __builtin_aarch64_srshlv16qi (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshlq_s16 (int16x8_t __a, int16x8_t __b)
{
return (int16x8_t) __builtin_aarch64_srshlv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshlq_s32 (int32x4_t __a, int32x4_t __b)
{
return (int32x4_t) __builtin_aarch64_srshlv4si (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshlq_s64 (int64x2_t __a, int64x2_t __b)
{
return (int64x2_t) __builtin_aarch64_srshlv2di (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshlq_u8 (uint8x16_t __a, int8x16_t __b)
{
return __builtin_aarch64_urshlv16qi_uus (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshlq_u16 (uint16x8_t __a, int16x8_t __b)
{
return __builtin_aarch64_urshlv8hi_uus (__a, __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshlq_u32 (uint32x4_t __a, int32x4_t __b)
{
return __builtin_aarch64_urshlv4si_uus (__a, __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshlq_u64 (uint64x2_t __a, int64x2_t __b)
{
return __builtin_aarch64_urshlv2di_uus (__a, __b);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshld_s64 (int64_t __a, int64_t __b)
{
return __builtin_aarch64_srshldi (__a, __b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshld_u64 (uint64_t __a, int64_t __b)
{
return __builtin_aarch64_urshldi_uus (__a, __b);
@@ -21605,219 +25336,344 @@ vrshld_u64 (uint64_t __a, int64_t __b)
/* vrshr */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshr_n_s8 (int8x8_t __a, const int __b)
{
return (int8x8_t) __builtin_aarch64_srshr_nv8qi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshr_n_s16 (int16x4_t __a, const int __b)
{
return (int16x4_t) __builtin_aarch64_srshr_nv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshr_n_s32 (int32x2_t __a, const int __b)
{
return (int32x2_t) __builtin_aarch64_srshr_nv2si (__a, __b);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshr_n_s64 (int64x1_t __a, const int __b)
{
return (int64x1_t) {__builtin_aarch64_srshr_ndi (__a[0], __b)};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshr_n_u8 (uint8x8_t __a, const int __b)
{
return __builtin_aarch64_urshr_nv8qi_uus (__a, __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshr_n_u16 (uint16x4_t __a, const int __b)
{
return __builtin_aarch64_urshr_nv4hi_uus (__a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshr_n_u32 (uint32x2_t __a, const int __b)
{
return __builtin_aarch64_urshr_nv2si_uus (__a, __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshr_n_u64 (uint64x1_t __a, const int __b)
{
return (uint64x1_t) {__builtin_aarch64_urshr_ndi_uus (__a[0], __b)};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshrq_n_s8 (int8x16_t __a, const int __b)
{
return (int8x16_t) __builtin_aarch64_srshr_nv16qi (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshrq_n_s16 (int16x8_t __a, const int __b)
{
return (int16x8_t) __builtin_aarch64_srshr_nv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshrq_n_s32 (int32x4_t __a, const int __b)
{
return (int32x4_t) __builtin_aarch64_srshr_nv4si (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshrq_n_s64 (int64x2_t __a, const int __b)
{
return (int64x2_t) __builtin_aarch64_srshr_nv2di (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshrq_n_u8 (uint8x16_t __a, const int __b)
{
return __builtin_aarch64_urshr_nv16qi_uus (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshrq_n_u16 (uint16x8_t __a, const int __b)
{
return __builtin_aarch64_urshr_nv8hi_uus (__a, __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshrq_n_u32 (uint32x4_t __a, const int __b)
{
return __builtin_aarch64_urshr_nv4si_uus (__a, __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshrq_n_u64 (uint64x2_t __a, const int __b)
{
return __builtin_aarch64_urshr_nv2di_uus (__a, __b);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshrd_n_s64 (int64_t __a, const int __b)
{
return __builtin_aarch64_srshr_ndi (__a, __b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrshrd_n_u64 (uint64_t __a, const int __b)
{
return __builtin_aarch64_urshr_ndi_uus (__a, __b);
}
+/* vrsqrte. */
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrtes_f32 (float32_t __a)
+{
+ return __builtin_aarch64_rsqrtesf (__a);
+}
+
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrted_f64 (float64_t __a)
+{
+ return __builtin_aarch64_rsqrtedf (__a);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrte_f32 (float32x2_t __a)
+{
+ return __builtin_aarch64_rsqrtev2sf (__a);
+}
+
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrte_f64 (float64x1_t __a)
+{
+ return (float64x1_t) {vrsqrted_f64 (vget_lane_f64 (__a, 0))};
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrteq_f32 (float32x4_t __a)
+{
+ return __builtin_aarch64_rsqrtev4sf (__a);
+}
+
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrteq_f64 (float64x2_t __a)
+{
+ return __builtin_aarch64_rsqrtev2df (__a);
+}
+
+/* vrsqrts. */
+
+__extension__ extern __inline float32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrtss_f32 (float32_t __a, float32_t __b)
+{
+ return __builtin_aarch64_rsqrtssf (__a, __b);
+}
+
+__extension__ extern __inline float64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrtsd_f64 (float64_t __a, float64_t __b)
+{
+ return __builtin_aarch64_rsqrtsdf (__a, __b);
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrts_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __builtin_aarch64_rsqrtsv2sf (__a, __b);
+}
+
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrts_f64 (float64x1_t __a, float64x1_t __b)
+{
+ return (float64x1_t) {vrsqrtsd_f64 (vget_lane_f64 (__a, 0),
+ vget_lane_f64 (__b, 0))};
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrtsq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_aarch64_rsqrtsv4sf (__a, __b);
+}
+
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrtsq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __builtin_aarch64_rsqrtsv2df (__a, __b);
+}
+
/* vrsra */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
{
return (int8x8_t) __builtin_aarch64_srsra_nv8qi (__a, __b, __c);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
return (int16x4_t) __builtin_aarch64_srsra_nv4hi (__a, __b, __c);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
return (int32x2_t) __builtin_aarch64_srsra_nv2si (__a, __b, __c);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
{
return (int64x1_t) {__builtin_aarch64_srsra_ndi (__a[0], __b[0], __c)};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
{
return __builtin_aarch64_ursra_nv8qi_uuus (__a, __b, __c);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
{
return __builtin_aarch64_ursra_nv4hi_uuus (__a, __b, __c);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
{
return __builtin_aarch64_ursra_nv2si_uuus (__a, __b, __c);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
{
return (uint64x1_t) {__builtin_aarch64_ursra_ndi_uuus (__a[0], __b[0], __c)};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
{
return (int8x16_t) __builtin_aarch64_srsra_nv16qi (__a, __b, __c);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
{
return (int16x8_t) __builtin_aarch64_srsra_nv8hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
{
return (int32x4_t) __builtin_aarch64_srsra_nv4si (__a, __b, __c);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
{
return (int64x2_t) __builtin_aarch64_srsra_nv2di (__a, __b, __c);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
{
return __builtin_aarch64_ursra_nv16qi_uuus (__a, __b, __c);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
{
return __builtin_aarch64_ursra_nv8hi_uuus (__a, __b, __c);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
{
return __builtin_aarch64_ursra_nv4si_uuus (__a, __b, __c);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
{
return __builtin_aarch64_ursra_nv2di_uuus (__a, __b, __c);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsrad_n_s64 (int64_t __a, int64_t __b, const int __c)
{
return __builtin_aarch64_srsra_ndi (__a, __b, __c);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vrsrad_n_u64 (uint64_t __a, uint64_t __b, const int __c)
{
return __builtin_aarch64_ursra_ndi_uuus (__a, __b, __c);
@@ -21828,74 +25684,86 @@ vrsrad_n_u64 (uint64_t __a, uint64_t __b, const int __c)
/* vsha1 */
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsha1cq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk)
{
return __builtin_aarch64_crypto_sha1cv4si_uuuu (hash_abcd, hash_e, wk);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsha1mq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk)
{
return __builtin_aarch64_crypto_sha1mv4si_uuuu (hash_abcd, hash_e, wk);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsha1pq_u32 (uint32x4_t hash_abcd, uint32_t hash_e, uint32x4_t wk)
{
return __builtin_aarch64_crypto_sha1pv4si_uuuu (hash_abcd, hash_e, wk);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsha1h_u32 (uint32_t hash_e)
{
return __builtin_aarch64_crypto_sha1hsi_uu (hash_e);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsha1su0q_u32 (uint32x4_t w0_3, uint32x4_t w4_7, uint32x4_t w8_11)
{
return __builtin_aarch64_crypto_sha1su0v4si_uuuu (w0_3, w4_7, w8_11);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsha1su1q_u32 (uint32x4_t tw0_3, uint32x4_t w12_15)
{
return __builtin_aarch64_crypto_sha1su1v4si_uuu (tw0_3, w12_15);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsha256hq_u32 (uint32x4_t hash_abcd, uint32x4_t hash_efgh, uint32x4_t wk)
{
return __builtin_aarch64_crypto_sha256hv4si_uuuu (hash_abcd, hash_efgh, wk);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsha256h2q_u32 (uint32x4_t hash_efgh, uint32x4_t hash_abcd, uint32x4_t wk)
{
return __builtin_aarch64_crypto_sha256h2v4si_uuuu (hash_efgh, hash_abcd, wk);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsha256su0q_u32 (uint32x4_t w0_3, uint32x4_t w4_7)
{
return __builtin_aarch64_crypto_sha256su0v4si_uuu (w0_3, w4_7);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsha256su1q_u32 (uint32x4_t tw0_3, uint32x4_t w8_11, uint32x4_t w12_15)
{
return __builtin_aarch64_crypto_sha256su1v4si_uuuu (tw0_3, w8_11, w12_15);
}
-__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_p64 (poly64_t a, poly64_t b)
{
return
__builtin_aarch64_crypto_pmulldi_ppp (a, b);
}
-__extension__ static __inline poly128_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly128_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_high_p64 (poly64x2_t a, poly64x2_t b)
{
return __builtin_aarch64_crypto_pmullv2di_ppp (a, b);
@@ -21905,289 +25773,337 @@ vmull_high_p64 (poly64x2_t a, poly64x2_t b)
/* vshl */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshl_n_s8 (int8x8_t __a, const int __b)
{
return (int8x8_t) __builtin_aarch64_ashlv8qi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshl_n_s16 (int16x4_t __a, const int __b)
{
return (int16x4_t) __builtin_aarch64_ashlv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshl_n_s32 (int32x2_t __a, const int __b)
{
return (int32x2_t) __builtin_aarch64_ashlv2si (__a, __b);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshl_n_s64 (int64x1_t __a, const int __b)
{
return (int64x1_t) {__builtin_aarch64_ashldi (__a[0], __b)};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshl_n_u8 (uint8x8_t __a, const int __b)
{
return (uint8x8_t) __builtin_aarch64_ashlv8qi ((int8x8_t) __a, __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshl_n_u16 (uint16x4_t __a, const int __b)
{
return (uint16x4_t) __builtin_aarch64_ashlv4hi ((int16x4_t) __a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshl_n_u32 (uint32x2_t __a, const int __b)
{
return (uint32x2_t) __builtin_aarch64_ashlv2si ((int32x2_t) __a, __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshl_n_u64 (uint64x1_t __a, const int __b)
{
return (uint64x1_t) {__builtin_aarch64_ashldi ((int64_t) __a[0], __b)};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshlq_n_s8 (int8x16_t __a, const int __b)
{
return (int8x16_t) __builtin_aarch64_ashlv16qi (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshlq_n_s16 (int16x8_t __a, const int __b)
{
return (int16x8_t) __builtin_aarch64_ashlv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshlq_n_s32 (int32x4_t __a, const int __b)
{
return (int32x4_t) __builtin_aarch64_ashlv4si (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshlq_n_s64 (int64x2_t __a, const int __b)
{
return (int64x2_t) __builtin_aarch64_ashlv2di (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshlq_n_u8 (uint8x16_t __a, const int __b)
{
return (uint8x16_t) __builtin_aarch64_ashlv16qi ((int8x16_t) __a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshlq_n_u16 (uint16x8_t __a, const int __b)
{
return (uint16x8_t) __builtin_aarch64_ashlv8hi ((int16x8_t) __a, __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshlq_n_u32 (uint32x4_t __a, const int __b)
{
return (uint32x4_t) __builtin_aarch64_ashlv4si ((int32x4_t) __a, __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshlq_n_u64 (uint64x2_t __a, const int __b)
{
return (uint64x2_t) __builtin_aarch64_ashlv2di ((int64x2_t) __a, __b);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshld_n_s64 (int64_t __a, const int __b)
{
return __builtin_aarch64_ashldi (__a, __b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshld_n_u64 (uint64_t __a, const int __b)
{
return (uint64_t) __builtin_aarch64_ashldi (__a, __b);
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshl_s8 (int8x8_t __a, int8x8_t __b)
{
return __builtin_aarch64_sshlv8qi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshl_s16 (int16x4_t __a, int16x4_t __b)
{
return __builtin_aarch64_sshlv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshl_s32 (int32x2_t __a, int32x2_t __b)
{
return __builtin_aarch64_sshlv2si (__a, __b);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshl_s64 (int64x1_t __a, int64x1_t __b)
{
return (int64x1_t) {__builtin_aarch64_sshldi (__a[0], __b[0])};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshl_u8 (uint8x8_t __a, int8x8_t __b)
{
return __builtin_aarch64_ushlv8qi_uus (__a, __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshl_u16 (uint16x4_t __a, int16x4_t __b)
{
return __builtin_aarch64_ushlv4hi_uus (__a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshl_u32 (uint32x2_t __a, int32x2_t __b)
{
return __builtin_aarch64_ushlv2si_uus (__a, __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshl_u64 (uint64x1_t __a, int64x1_t __b)
{
return (uint64x1_t) {__builtin_aarch64_ushldi_uus (__a[0], __b[0])};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshlq_s8 (int8x16_t __a, int8x16_t __b)
{
return __builtin_aarch64_sshlv16qi (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshlq_s16 (int16x8_t __a, int16x8_t __b)
{
return __builtin_aarch64_sshlv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshlq_s32 (int32x4_t __a, int32x4_t __b)
{
return __builtin_aarch64_sshlv4si (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshlq_s64 (int64x2_t __a, int64x2_t __b)
{
return __builtin_aarch64_sshlv2di (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshlq_u8 (uint8x16_t __a, int8x16_t __b)
{
return __builtin_aarch64_ushlv16qi_uus (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshlq_u16 (uint16x8_t __a, int16x8_t __b)
{
return __builtin_aarch64_ushlv8hi_uus (__a, __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshlq_u32 (uint32x4_t __a, int32x4_t __b)
{
return __builtin_aarch64_ushlv4si_uus (__a, __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshlq_u64 (uint64x2_t __a, int64x2_t __b)
{
return __builtin_aarch64_ushlv2di_uus (__a, __b);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshld_s64 (int64_t __a, int64_t __b)
{
return __builtin_aarch64_sshldi (__a, __b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshld_u64 (uint64_t __a, uint64_t __b)
{
return __builtin_aarch64_ushldi_uus (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshll_high_n_s8 (int8x16_t __a, const int __b)
{
return __builtin_aarch64_sshll2_nv16qi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshll_high_n_s16 (int16x8_t __a, const int __b)
{
return __builtin_aarch64_sshll2_nv8hi (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshll_high_n_s32 (int32x4_t __a, const int __b)
{
return __builtin_aarch64_sshll2_nv4si (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshll_high_n_u8 (uint8x16_t __a, const int __b)
{
return (uint16x8_t) __builtin_aarch64_ushll2_nv16qi ((int8x16_t) __a, __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshll_high_n_u16 (uint16x8_t __a, const int __b)
{
return (uint32x4_t) __builtin_aarch64_ushll2_nv8hi ((int16x8_t) __a, __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshll_high_n_u32 (uint32x4_t __a, const int __b)
{
return (uint64x2_t) __builtin_aarch64_ushll2_nv4si ((int32x4_t) __a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshll_n_s8 (int8x8_t __a, const int __b)
{
return __builtin_aarch64_sshll_nv8qi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshll_n_s16 (int16x4_t __a, const int __b)
{
return __builtin_aarch64_sshll_nv4hi (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshll_n_s32 (int32x2_t __a, const int __b)
{
return __builtin_aarch64_sshll_nv2si (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshll_n_u8 (uint8x8_t __a, const int __b)
{
return __builtin_aarch64_ushll_nv8qi_uus (__a, __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshll_n_u16 (uint16x4_t __a, const int __b)
{
return __builtin_aarch64_ushll_nv4hi_uus (__a, __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshll_n_u32 (uint32x2_t __a, const int __b)
{
return __builtin_aarch64_ushll_nv2si_uus (__a, __b);
@@ -22195,109 +26111,127 @@ vshll_n_u32 (uint32x2_t __a, const int __b)
/* vshr */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshr_n_s8 (int8x8_t __a, const int __b)
{
return (int8x8_t) __builtin_aarch64_ashrv8qi (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshr_n_s16 (int16x4_t __a, const int __b)
{
return (int16x4_t) __builtin_aarch64_ashrv4hi (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshr_n_s32 (int32x2_t __a, const int __b)
{
return (int32x2_t) __builtin_aarch64_ashrv2si (__a, __b);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshr_n_s64 (int64x1_t __a, const int __b)
{
return (int64x1_t) {__builtin_aarch64_ashr_simddi (__a[0], __b)};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshr_n_u8 (uint8x8_t __a, const int __b)
{
return (uint8x8_t) __builtin_aarch64_lshrv8qi ((int8x8_t) __a, __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshr_n_u16 (uint16x4_t __a, const int __b)
{
return (uint16x4_t) __builtin_aarch64_lshrv4hi ((int16x4_t) __a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshr_n_u32 (uint32x2_t __a, const int __b)
{
return (uint32x2_t) __builtin_aarch64_lshrv2si ((int32x2_t) __a, __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshr_n_u64 (uint64x1_t __a, const int __b)
{
return (uint64x1_t) {__builtin_aarch64_lshr_simddi_uus ( __a[0], __b)};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrq_n_s8 (int8x16_t __a, const int __b)
{
return (int8x16_t) __builtin_aarch64_ashrv16qi (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrq_n_s16 (int16x8_t __a, const int __b)
{
return (int16x8_t) __builtin_aarch64_ashrv8hi (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrq_n_s32 (int32x4_t __a, const int __b)
{
return (int32x4_t) __builtin_aarch64_ashrv4si (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrq_n_s64 (int64x2_t __a, const int __b)
{
return (int64x2_t) __builtin_aarch64_ashrv2di (__a, __b);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrq_n_u8 (uint8x16_t __a, const int __b)
{
return (uint8x16_t) __builtin_aarch64_lshrv16qi ((int8x16_t) __a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrq_n_u16 (uint16x8_t __a, const int __b)
{
return (uint16x8_t) __builtin_aarch64_lshrv8hi ((int16x8_t) __a, __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrq_n_u32 (uint32x4_t __a, const int __b)
{
return (uint32x4_t) __builtin_aarch64_lshrv4si ((int32x4_t) __a, __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrq_n_u64 (uint64x2_t __a, const int __b)
{
return (uint64x2_t) __builtin_aarch64_lshrv2di ((int64x2_t) __a, __b);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrd_n_s64 (int64_t __a, const int __b)
{
return __builtin_aarch64_ashr_simddi (__a, __b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vshrd_n_u64 (uint64_t __a, const int __b)
{
return __builtin_aarch64_lshr_simddi_uus (__a, __b);
@@ -22305,109 +26239,141 @@ vshrd_n_u64 (uint64_t __a, const int __b)
/* vsli */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
{
return (int8x8_t) __builtin_aarch64_ssli_nv8qi (__a, __b, __c);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
return (int16x4_t) __builtin_aarch64_ssli_nv4hi (__a, __b, __c);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
return (int32x2_t) __builtin_aarch64_ssli_nv2si (__a, __b, __c);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
{
return (int64x1_t) {__builtin_aarch64_ssli_ndi (__a[0], __b[0], __c)};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
{
return __builtin_aarch64_usli_nv8qi_uuus (__a, __b, __c);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
{
return __builtin_aarch64_usli_nv4hi_uuus (__a, __b, __c);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
{
return __builtin_aarch64_usli_nv2si_uuus (__a, __b, __c);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
{
return (uint64x1_t) {__builtin_aarch64_usli_ndi_uuus (__a[0], __b[0], __c)};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsli_n_p64 (poly64x1_t __a, poly64x1_t __b, const int __c)
+{
+ return (poly64x1_t) {__builtin_aarch64_ssli_ndi_ppps (__a[0], __b[0], __c)};
+}
+
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
{
return (int8x16_t) __builtin_aarch64_ssli_nv16qi (__a, __b, __c);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
{
return (int16x8_t) __builtin_aarch64_ssli_nv8hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
{
return (int32x4_t) __builtin_aarch64_ssli_nv4si (__a, __b, __c);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
{
return (int64x2_t) __builtin_aarch64_ssli_nv2di (__a, __b, __c);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
{
return __builtin_aarch64_usli_nv16qi_uuus (__a, __b, __c);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
{
return __builtin_aarch64_usli_nv8hi_uuus (__a, __b, __c);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
{
return __builtin_aarch64_usli_nv4si_uuus (__a, __b, __c);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
{
return __builtin_aarch64_usli_nv2di_uuus (__a, __b, __c);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsliq_n_p64 (poly64x2_t __a, poly64x2_t __b, const int __c)
+{
+ return __builtin_aarch64_ssli_nv2di_ppps (__a, __b, __c);
+}
+
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vslid_n_s64 (int64_t __a, int64_t __b, const int __c)
{
return __builtin_aarch64_ssli_ndi (__a, __b, __c);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vslid_n_u64 (uint64_t __a, uint64_t __b, const int __c)
{
return __builtin_aarch64_usli_ndi_uuus (__a, __b, __c);
@@ -22415,98 +26381,114 @@ vslid_n_u64 (uint64_t __a, uint64_t __b, const int __c)
/* vsqadd */
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsqadd_u8 (uint8x8_t __a, int8x8_t __b)
{
return __builtin_aarch64_usqaddv8qi_uus (__a, __b);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsqadd_u16 (uint16x4_t __a, int16x4_t __b)
{
return __builtin_aarch64_usqaddv4hi_uus (__a, __b);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsqadd_u32 (uint32x2_t __a, int32x2_t __b)
{
return __builtin_aarch64_usqaddv2si_uus (__a, __b);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsqadd_u64 (uint64x1_t __a, int64x1_t __b)
{
return (uint64x1_t) {__builtin_aarch64_usqadddi_uus (__a[0], __b[0])};
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsqaddq_u8 (uint8x16_t __a, int8x16_t __b)
{
return __builtin_aarch64_usqaddv16qi_uus (__a, __b);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsqaddq_u16 (uint16x8_t __a, int16x8_t __b)
{
return __builtin_aarch64_usqaddv8hi_uus (__a, __b);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsqaddq_u32 (uint32x4_t __a, int32x4_t __b)
{
return __builtin_aarch64_usqaddv4si_uus (__a, __b);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsqaddq_u64 (uint64x2_t __a, int64x2_t __b)
{
return __builtin_aarch64_usqaddv2di_uus (__a, __b);
}
-__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsqaddb_u8 (uint8_t __a, int8_t __b)
{
return __builtin_aarch64_usqaddqi_uus (__a, __b);
}
-__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsqaddh_u16 (uint16_t __a, int16_t __b)
{
return __builtin_aarch64_usqaddhi_uus (__a, __b);
}
-__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsqadds_u32 (uint32_t __a, int32_t __b)
{
return __builtin_aarch64_usqaddsi_uus (__a, __b);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsqaddd_u64 (uint64_t __a, int64_t __b)
{
return __builtin_aarch64_usqadddi_uus (__a, __b);
}
/* vsqrt */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsqrt_f32 (float32x2_t a)
{
return __builtin_aarch64_sqrtv2sf (a);
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsqrtq_f32 (float32x4_t a)
{
return __builtin_aarch64_sqrtv4sf (a);
}
-__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsqrt_f64 (float64x1_t a)
{
return (float64x1_t) { __builtin_aarch64_sqrtdf (a[0]) };
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsqrtq_f64 (float64x2_t a)
{
return __builtin_aarch64_sqrtv2df (a);
@@ -22514,109 +26496,127 @@ vsqrtq_f64 (float64x2_t a)
/* vsra */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
{
return (int8x8_t) __builtin_aarch64_ssra_nv8qi (__a, __b, __c);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
return (int16x4_t) __builtin_aarch64_ssra_nv4hi (__a, __b, __c);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
return (int32x2_t) __builtin_aarch64_ssra_nv2si (__a, __b, __c);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
{
return (int64x1_t) {__builtin_aarch64_ssra_ndi (__a[0], __b[0], __c)};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
{
return __builtin_aarch64_usra_nv8qi_uuus (__a, __b, __c);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
{
return __builtin_aarch64_usra_nv4hi_uuus (__a, __b, __c);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
{
return __builtin_aarch64_usra_nv2si_uuus (__a, __b, __c);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
{
return (uint64x1_t) {__builtin_aarch64_usra_ndi_uuus (__a[0], __b[0], __c)};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
{
return (int8x16_t) __builtin_aarch64_ssra_nv16qi (__a, __b, __c);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
{
return (int16x8_t) __builtin_aarch64_ssra_nv8hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
{
return (int32x4_t) __builtin_aarch64_ssra_nv4si (__a, __b, __c);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
{
return (int64x2_t) __builtin_aarch64_ssra_nv2di (__a, __b, __c);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
{
return __builtin_aarch64_usra_nv16qi_uuus (__a, __b, __c);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
{
return __builtin_aarch64_usra_nv8hi_uuus (__a, __b, __c);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
{
return __builtin_aarch64_usra_nv4si_uuus (__a, __b, __c);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
{
return __builtin_aarch64_usra_nv2di_uuus (__a, __b, __c);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsrad_n_s64 (int64_t __a, int64_t __b, const int __c)
{
return __builtin_aarch64_ssra_ndi (__a, __b, __c);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsrad_n_u64 (uint64_t __a, uint64_t __b, const int __c)
{
return __builtin_aarch64_usra_ndi_uuus (__a, __b, __c);
@@ -22624,109 +26624,127 @@ vsrad_n_u64 (uint64_t __a, uint64_t __b, const int __c)
/* vsri */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
{
return (int8x8_t) __builtin_aarch64_ssri_nv8qi (__a, __b, __c);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
{
return (int16x4_t) __builtin_aarch64_ssri_nv4hi (__a, __b, __c);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
{
return (int32x2_t) __builtin_aarch64_ssri_nv2si (__a, __b, __c);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
{
return (int64x1_t) {__builtin_aarch64_ssri_ndi (__a[0], __b[0], __c)};
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
{
return __builtin_aarch64_usri_nv8qi_uuus (__a, __b, __c);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
{
return __builtin_aarch64_usri_nv4hi_uuus (__a, __b, __c);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
{
return __builtin_aarch64_usri_nv2si_uuus (__a, __b, __c);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
{
return (uint64x1_t) {__builtin_aarch64_usri_ndi_uuus (__a[0], __b[0], __c)};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
{
return (int8x16_t) __builtin_aarch64_ssri_nv16qi (__a, __b, __c);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
{
return (int16x8_t) __builtin_aarch64_ssri_nv8hi (__a, __b, __c);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
{
return (int32x4_t) __builtin_aarch64_ssri_nv4si (__a, __b, __c);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
{
return (int64x2_t) __builtin_aarch64_ssri_nv2di (__a, __b, __c);
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
{
return __builtin_aarch64_usri_nv16qi_uuus (__a, __b, __c);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
{
return __builtin_aarch64_usri_nv8hi_uuus (__a, __b, __c);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
{
return __builtin_aarch64_usri_nv4si_uuus (__a, __b, __c);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
{
return __builtin_aarch64_usri_nv2di_uuus (__a, __b, __c);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsrid_n_s64 (int64_t __a, int64_t __b, const int __c)
{
return __builtin_aarch64_ssri_ndi (__a, __b, __c);
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsrid_n_u64 (uint64_t __a, uint64_t __b, const int __c)
{
return __builtin_aarch64_usri_ndi_uuus (__a, __b, __c);
@@ -22734,84 +26752,104 @@ vsrid_n_u64 (uint64_t __a, uint64_t __b, const int __c)
/* vst1 */
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_f16 (float16_t *__a, float16x4_t __b)
{
__builtin_aarch64_st1v4hf (__a, __b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_f32 (float32_t *a, float32x2_t b)
{
__builtin_aarch64_st1v2sf ((__builtin_aarch64_simd_sf *) a, b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_f64 (float64_t *a, float64x1_t b)
{
*a = b[0];
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_p8 (poly8_t *a, poly8x8_t b)
{
__builtin_aarch64_st1v8qi ((__builtin_aarch64_simd_qi *) a,
(int8x8_t) b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_p16 (poly16_t *a, poly16x4_t b)
{
__builtin_aarch64_st1v4hi ((__builtin_aarch64_simd_hi *) a,
(int16x4_t) b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_p64 (poly64_t *a, poly64x1_t b)
+{
+ *a = b[0];
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_s8 (int8_t *a, int8x8_t b)
{
__builtin_aarch64_st1v8qi ((__builtin_aarch64_simd_qi *) a, b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_s16 (int16_t *a, int16x4_t b)
{
__builtin_aarch64_st1v4hi ((__builtin_aarch64_simd_hi *) a, b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_s32 (int32_t *a, int32x2_t b)
{
__builtin_aarch64_st1v2si ((__builtin_aarch64_simd_si *) a, b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_s64 (int64_t *a, int64x1_t b)
{
*a = b[0];
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_u8 (uint8_t *a, uint8x8_t b)
{
__builtin_aarch64_st1v8qi ((__builtin_aarch64_simd_qi *) a,
(int8x8_t) b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_u16 (uint16_t *a, uint16x4_t b)
{
__builtin_aarch64_st1v4hi ((__builtin_aarch64_simd_hi *) a,
(int16x4_t) b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_u32 (uint32_t *a, uint32x2_t b)
{
__builtin_aarch64_st1v2si ((__builtin_aarch64_simd_si *) a,
(int32x2_t) b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_u64 (uint64_t *a, uint64x1_t b)
{
*a = b[0];
@@ -22819,84 +26857,105 @@ vst1_u64 (uint64_t *a, uint64x1_t b)
/* vst1q */
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_f16 (float16_t *__a, float16x8_t __b)
{
__builtin_aarch64_st1v8hf (__a, __b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_f32 (float32_t *a, float32x4_t b)
{
__builtin_aarch64_st1v4sf ((__builtin_aarch64_simd_sf *) a, b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_f64 (float64_t *a, float64x2_t b)
{
__builtin_aarch64_st1v2df ((__builtin_aarch64_simd_df *) a, b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_p8 (poly8_t *a, poly8x16_t b)
{
__builtin_aarch64_st1v16qi ((__builtin_aarch64_simd_qi *) a,
(int8x16_t) b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_p16 (poly16_t *a, poly16x8_t b)
{
__builtin_aarch64_st1v8hi ((__builtin_aarch64_simd_hi *) a,
(int16x8_t) b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_p64 (poly64_t *a, poly64x2_t b)
+{
+ __builtin_aarch64_st1v2di_sp ((__builtin_aarch64_simd_di *) a,
+ (poly64x2_t) b);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_s8 (int8_t *a, int8x16_t b)
{
__builtin_aarch64_st1v16qi ((__builtin_aarch64_simd_qi *) a, b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_s16 (int16_t *a, int16x8_t b)
{
__builtin_aarch64_st1v8hi ((__builtin_aarch64_simd_hi *) a, b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_s32 (int32_t *a, int32x4_t b)
{
__builtin_aarch64_st1v4si ((__builtin_aarch64_simd_si *) a, b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_s64 (int64_t *a, int64x2_t b)
{
__builtin_aarch64_st1v2di ((__builtin_aarch64_simd_di *) a, b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_u8 (uint8_t *a, uint8x16_t b)
{
__builtin_aarch64_st1v16qi ((__builtin_aarch64_simd_qi *) a,
(int8x16_t) b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_u16 (uint16_t *a, uint16x8_t b)
{
__builtin_aarch64_st1v8hi ((__builtin_aarch64_simd_hi *) a,
(int16x8_t) b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_u32 (uint32_t *a, uint32x4_t b)
{
__builtin_aarch64_st1v4si ((__builtin_aarch64_simd_si *) a,
(int32x4_t) b);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_u64 (uint64_t *a, uint64x2_t b)
{
__builtin_aarch64_st1v2di ((__builtin_aarch64_simd_di *) a,
@@ -22905,79 +26964,99 @@ vst1q_u64 (uint64_t *a, uint64x2_t b)
/* vst1_lane */
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_f16 (float16_t *__a, float16x4_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_f32 (float32_t *__a, float32x2_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_f64 (float64_t *__a, float64x1_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_p8 (poly8_t *__a, poly8x8_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_p16 (poly16_t *__a, poly16x4_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1_lane_p64 (poly64_t *__a, poly64x1_t __b, const int __lane)
+{
+ *__a = __aarch64_vget_lane_any (__b, __lane);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_s8 (int8_t *__a, int8x8_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_s16 (int16_t *__a, int16x4_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_s32 (int32_t *__a, int32x2_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_s64 (int64_t *__a, int64x1_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_u8 (uint8_t *__a, uint8x8_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_u16 (uint16_t *__a, uint16x4_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_u32 (uint32_t *__a, uint32x2_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_u64 (uint64_t *__a, uint64x1_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
@@ -22985,79 +27064,99 @@ vst1_lane_u64 (uint64_t *__a, uint64x1_t __b, const int __lane)
/* vst1q_lane */
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_f16 (float16_t *__a, float16x8_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_f32 (float32_t *__a, float32x4_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_f64 (float64_t *__a, float64x2_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_p8 (poly8_t *__a, poly8x16_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_p16 (poly16_t *__a, poly16x8_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst1q_lane_p64 (poly64_t *__a, poly64x2_t __b, const int __lane)
+{
+ *__a = __aarch64_vget_lane_any (__b, __lane);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_s8 (int8_t *__a, int8x16_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_s16 (int16_t *__a, int16x8_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_s32 (int32_t *__a, int32x4_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_s64 (int64_t *__a, int64x2_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_u8 (uint8_t *__a, uint8x16_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_u16 (uint16_t *__a, uint16x8_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_u32 (uint32_t *__a, uint32x4_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_u64 (uint64_t *__a, uint64x2_t __b, const int __lane)
{
*__a = __aarch64_vget_lane_any (__b, __lane);
@@ -23065,7 +27164,8 @@ vst1q_lane_u64 (uint64_t *__a, uint64x2_t __b, const int __lane)
/* vstn */
-__extension__ static __inline void
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2_s64 (int64_t * __a, int64x1x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23077,7 +27177,8 @@ vst2_s64 (int64_t * __a, int64x1x2_t val)
__builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o);
}
-__extension__ static __inline void
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2_u64 (uint64_t * __a, uint64x1x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23089,7 +27190,8 @@ vst2_u64 (uint64_t * __a, uint64x1x2_t val)
__builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o);
}
-__extension__ static __inline void
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2_f64 (float64_t * __a, float64x1x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23101,7 +27203,8 @@ vst2_f64 (float64_t * __a, float64x1x2_t val)
__builtin_aarch64_st2df ((__builtin_aarch64_simd_df *) __a, __o);
}
-__extension__ static __inline void
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2_s8 (int8_t * __a, int8x8x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23113,7 +27216,8 @@ vst2_s8 (int8_t * __a, int8x8x2_t val)
__builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2_p8 (poly8_t * __a, poly8x8x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23125,7 +27229,8 @@ vst2_p8 (poly8_t * __a, poly8x8x2_t val)
__builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2_s16 (int16_t * __a, int16x4x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23137,7 +27242,8 @@ vst2_s16 (int16_t * __a, int16x4x2_t val)
__builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2_p16 (poly16_t * __a, poly16x4x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23149,7 +27255,8 @@ vst2_p16 (poly16_t * __a, poly16x4x2_t val)
__builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2_s32 (int32_t * __a, int32x2x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23161,7 +27268,8 @@ vst2_s32 (int32_t * __a, int32x2x2_t val)
__builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2_u8 (uint8_t * __a, uint8x8x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23173,7 +27281,8 @@ vst2_u8 (uint8_t * __a, uint8x8x2_t val)
__builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2_u16 (uint16_t * __a, uint16x4x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23185,7 +27294,8 @@ vst2_u16 (uint16_t * __a, uint16x4x2_t val)
__builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2_u32 (uint32_t * __a, uint32x2x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23197,7 +27307,8 @@ vst2_u32 (uint32_t * __a, uint32x2x2_t val)
__builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2_f16 (float16_t * __a, float16x4x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23209,7 +27320,8 @@ vst2_f16 (float16_t * __a, float16x4x2_t val)
__builtin_aarch64_st2v4hf (__a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2_f32 (float32_t * __a, float32x2x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23221,7 +27333,23 @@ vst2_f32 (float32_t * __a, float32x2x2_t val)
__builtin_aarch64_st2v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2_p64 (poly64_t * __a, poly64x1x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ poly64x2x2_t temp;
+ temp.val[0] = vcombine_p64 (val.val[0], vcreate_p64 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_p64 (val.val[1], vcreate_p64 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregoiv2di_ssps (__o,
+ (poly64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2di_ssps (__o,
+ (poly64x2_t) temp.val[1], 1);
+ __builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2q_s8 (int8_t * __a, int8x16x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23230,7 +27358,8 @@ vst2q_s8 (int8_t * __a, int8x16x2_t val)
__builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2q_p8 (poly8_t * __a, poly8x16x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23239,7 +27368,8 @@ vst2q_p8 (poly8_t * __a, poly8x16x2_t val)
__builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2q_s16 (int16_t * __a, int16x8x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23248,7 +27378,8 @@ vst2q_s16 (int16_t * __a, int16x8x2_t val)
__builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2q_p16 (poly16_t * __a, poly16x8x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23257,7 +27388,8 @@ vst2q_p16 (poly16_t * __a, poly16x8x2_t val)
__builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2q_s32 (int32_t * __a, int32x4x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23266,7 +27398,8 @@ vst2q_s32 (int32_t * __a, int32x4x2_t val)
__builtin_aarch64_st2v4si ((__builtin_aarch64_simd_si *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2q_s64 (int64_t * __a, int64x2x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23275,7 +27408,8 @@ vst2q_s64 (int64_t * __a, int64x2x2_t val)
__builtin_aarch64_st2v2di ((__builtin_aarch64_simd_di *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2q_u8 (uint8_t * __a, uint8x16x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23284,7 +27418,8 @@ vst2q_u8 (uint8_t * __a, uint8x16x2_t val)
__builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2q_u16 (uint16_t * __a, uint16x8x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23293,7 +27428,8 @@ vst2q_u16 (uint16_t * __a, uint16x8x2_t val)
__builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2q_u32 (uint32_t * __a, uint32x4x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23302,7 +27438,8 @@ vst2q_u32 (uint32_t * __a, uint32x4x2_t val)
__builtin_aarch64_st2v4si ((__builtin_aarch64_simd_si *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2q_u64 (uint64_t * __a, uint64x2x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23311,7 +27448,8 @@ vst2q_u64 (uint64_t * __a, uint64x2x2_t val)
__builtin_aarch64_st2v2di ((__builtin_aarch64_simd_di *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2q_f16 (float16_t * __a, float16x8x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23320,7 +27458,8 @@ vst2q_f16 (float16_t * __a, float16x8x2_t val)
__builtin_aarch64_st2v8hf (__a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2q_f32 (float32_t * __a, float32x4x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23329,7 +27468,8 @@ vst2q_f32 (float32_t * __a, float32x4x2_t val)
__builtin_aarch64_st2v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst2q_f64 (float64_t * __a, float64x2x2_t val)
{
__builtin_aarch64_simd_oi __o;
@@ -23338,7 +27478,20 @@ vst2q_f64 (float64_t * __a, float64x2x2_t val)
__builtin_aarch64_st2v2df ((__builtin_aarch64_simd_df *) __a, __o);
}
-__extension__ static __inline void
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst2q_p64 (poly64_t * __a, poly64x2x2_t val)
+{
+ __builtin_aarch64_simd_oi __o;
+ __o = __builtin_aarch64_set_qregoiv2di_ssps (__o,
+ (poly64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregoiv2di_ssps (__o,
+ (poly64x2_t) val.val[1], 1);
+ __builtin_aarch64_st2v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3_s64 (int64_t * __a, int64x1x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23352,7 +27505,8 @@ vst3_s64 (int64_t * __a, int64x1x3_t val)
__builtin_aarch64_st3di ((__builtin_aarch64_simd_di *) __a, __o);
}
-__extension__ static __inline void
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3_u64 (uint64_t * __a, uint64x1x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23366,7 +27520,8 @@ vst3_u64 (uint64_t * __a, uint64x1x3_t val)
__builtin_aarch64_st3di ((__builtin_aarch64_simd_di *) __a, __o);
}
-__extension__ static __inline void
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3_f64 (float64_t * __a, float64x1x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23380,7 +27535,8 @@ vst3_f64 (float64_t * __a, float64x1x3_t val)
__builtin_aarch64_st3df ((__builtin_aarch64_simd_df *) __a, __o);
}
-__extension__ static __inline void
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3_s8 (int8_t * __a, int8x8x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23394,7 +27550,8 @@ vst3_s8 (int8_t * __a, int8x8x3_t val)
__builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3_p8 (poly8_t * __a, poly8x8x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23408,7 +27565,8 @@ vst3_p8 (poly8_t * __a, poly8x8x3_t val)
__builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3_s16 (int16_t * __a, int16x4x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23422,7 +27580,8 @@ vst3_s16 (int16_t * __a, int16x4x3_t val)
__builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3_p16 (poly16_t * __a, poly16x4x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23436,7 +27595,8 @@ vst3_p16 (poly16_t * __a, poly16x4x3_t val)
__builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3_s32 (int32_t * __a, int32x2x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23450,7 +27610,8 @@ vst3_s32 (int32_t * __a, int32x2x3_t val)
__builtin_aarch64_st3v2si ((__builtin_aarch64_simd_si *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3_u8 (uint8_t * __a, uint8x8x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23464,7 +27625,8 @@ vst3_u8 (uint8_t * __a, uint8x8x3_t val)
__builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3_u16 (uint16_t * __a, uint16x4x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23478,7 +27640,8 @@ vst3_u16 (uint16_t * __a, uint16x4x3_t val)
__builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3_u32 (uint32_t * __a, uint32x2x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23492,7 +27655,8 @@ vst3_u32 (uint32_t * __a, uint32x2x3_t val)
__builtin_aarch64_st3v2si ((__builtin_aarch64_simd_si *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3_f16 (float16_t * __a, float16x4x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23506,7 +27670,8 @@ vst3_f16 (float16_t * __a, float16x4x3_t val)
__builtin_aarch64_st3v4hf ((__builtin_aarch64_simd_hf *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3_f32 (float32_t * __a, float32x2x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23520,7 +27685,26 @@ vst3_f32 (float32_t * __a, float32x2x3_t val)
__builtin_aarch64_st3v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3_p64 (poly64_t * __a, poly64x1x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ poly64x2x3_t temp;
+ temp.val[0] = vcombine_p64 (val.val[0], vcreate_p64 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_p64 (val.val[1], vcreate_p64 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_p64 (val.val[2], vcreate_p64 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
+ (poly64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
+ (poly64x2_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
+ (poly64x2_t) temp.val[2], 2);
+ __builtin_aarch64_st3di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3q_s8 (int8_t * __a, int8x16x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23530,7 +27714,8 @@ vst3q_s8 (int8_t * __a, int8x16x3_t val)
__builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3q_p8 (poly8_t * __a, poly8x16x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23540,7 +27725,8 @@ vst3q_p8 (poly8_t * __a, poly8x16x3_t val)
__builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3q_s16 (int16_t * __a, int16x8x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23550,7 +27736,8 @@ vst3q_s16 (int16_t * __a, int16x8x3_t val)
__builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3q_p16 (poly16_t * __a, poly16x8x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23560,7 +27747,8 @@ vst3q_p16 (poly16_t * __a, poly16x8x3_t val)
__builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3q_s32 (int32_t * __a, int32x4x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23570,7 +27758,8 @@ vst3q_s32 (int32_t * __a, int32x4x3_t val)
__builtin_aarch64_st3v4si ((__builtin_aarch64_simd_si *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3q_s64 (int64_t * __a, int64x2x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23580,7 +27769,8 @@ vst3q_s64 (int64_t * __a, int64x2x3_t val)
__builtin_aarch64_st3v2di ((__builtin_aarch64_simd_di *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3q_u8 (uint8_t * __a, uint8x16x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23590,7 +27780,8 @@ vst3q_u8 (uint8_t * __a, uint8x16x3_t val)
__builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3q_u16 (uint16_t * __a, uint16x8x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23600,7 +27791,8 @@ vst3q_u16 (uint16_t * __a, uint16x8x3_t val)
__builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3q_u32 (uint32_t * __a, uint32x4x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23610,7 +27802,8 @@ vst3q_u32 (uint32_t * __a, uint32x4x3_t val)
__builtin_aarch64_st3v4si ((__builtin_aarch64_simd_si *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3q_u64 (uint64_t * __a, uint64x2x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23620,7 +27813,8 @@ vst3q_u64 (uint64_t * __a, uint64x2x3_t val)
__builtin_aarch64_st3v2di ((__builtin_aarch64_simd_di *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3q_f16 (float16_t * __a, float16x8x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23630,7 +27824,8 @@ vst3q_f16 (float16_t * __a, float16x8x3_t val)
__builtin_aarch64_st3v8hf ((__builtin_aarch64_simd_hf *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3q_f32 (float32_t * __a, float32x4x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23640,7 +27835,8 @@ vst3q_f32 (float32_t * __a, float32x4x3_t val)
__builtin_aarch64_st3v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst3q_f64 (float64_t * __a, float64x2x3_t val)
{
__builtin_aarch64_simd_ci __o;
@@ -23650,7 +27846,22 @@ vst3q_f64 (float64_t * __a, float64x2x3_t val)
__builtin_aarch64_st3v2df ((__builtin_aarch64_simd_df *) __a, __o);
}
-__extension__ static __inline void
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst3q_p64 (poly64_t * __a, poly64x2x3_t val)
+{
+ __builtin_aarch64_simd_ci __o;
+ __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
+ (poly64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
+ (poly64x2_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
+ (poly64x2_t) val.val[2], 2);
+ __builtin_aarch64_st3v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4_s64 (int64_t * __a, int64x1x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23666,7 +27877,8 @@ vst4_s64 (int64_t * __a, int64x1x4_t val)
__builtin_aarch64_st4di ((__builtin_aarch64_simd_di *) __a, __o);
}
-__extension__ static __inline void
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4_u64 (uint64_t * __a, uint64x1x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23682,7 +27894,8 @@ vst4_u64 (uint64_t * __a, uint64x1x4_t val)
__builtin_aarch64_st4di ((__builtin_aarch64_simd_di *) __a, __o);
}
-__extension__ static __inline void
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4_f64 (float64_t * __a, float64x1x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23698,7 +27911,8 @@ vst4_f64 (float64_t * __a, float64x1x4_t val)
__builtin_aarch64_st4df ((__builtin_aarch64_simd_df *) __a, __o);
}
-__extension__ static __inline void
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4_s8 (int8_t * __a, int8x8x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23714,7 +27928,8 @@ vst4_s8 (int8_t * __a, int8x8x4_t val)
__builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4_p8 (poly8_t * __a, poly8x8x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23730,7 +27945,8 @@ vst4_p8 (poly8_t * __a, poly8x8x4_t val)
__builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4_s16 (int16_t * __a, int16x4x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23746,7 +27962,8 @@ vst4_s16 (int16_t * __a, int16x4x4_t val)
__builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4_p16 (poly16_t * __a, poly16x4x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23762,7 +27979,8 @@ vst4_p16 (poly16_t * __a, poly16x4x4_t val)
__builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4_s32 (int32_t * __a, int32x2x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23778,7 +27996,8 @@ vst4_s32 (int32_t * __a, int32x2x4_t val)
__builtin_aarch64_st4v2si ((__builtin_aarch64_simd_si *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4_u8 (uint8_t * __a, uint8x8x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23794,7 +28013,8 @@ vst4_u8 (uint8_t * __a, uint8x8x4_t val)
__builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4_u16 (uint16_t * __a, uint16x4x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23810,7 +28030,8 @@ vst4_u16 (uint16_t * __a, uint16x4x4_t val)
__builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4_u32 (uint32_t * __a, uint32x2x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23826,7 +28047,8 @@ vst4_u32 (uint32_t * __a, uint32x2x4_t val)
__builtin_aarch64_st4v2si ((__builtin_aarch64_simd_si *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4_f16 (float16_t * __a, float16x4x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23842,7 +28064,8 @@ vst4_f16 (float16_t * __a, float16x4x4_t val)
__builtin_aarch64_st4v4hf ((__builtin_aarch64_simd_hf *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4_f32 (float32_t * __a, float32x2x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23858,7 +28081,29 @@ vst4_f32 (float32_t * __a, float32x2x4_t val)
__builtin_aarch64_st4v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4_p64 (poly64_t * __a, poly64x1x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ poly64x2x4_t temp;
+ temp.val[0] = vcombine_p64 (val.val[0], vcreate_p64 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_p64 (val.val[1], vcreate_p64 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_p64 (val.val[2], vcreate_p64 (__AARCH64_UINT64_C (0)));
+ temp.val[3] = vcombine_p64 (val.val[3], vcreate_p64 (__AARCH64_UINT64_C (0)));
+ __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
+ (poly64x2_t) temp.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
+ (poly64x2_t) temp.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
+ (poly64x2_t) temp.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
+ (poly64x2_t) temp.val[3], 3);
+ __builtin_aarch64_st4di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4q_s8 (int8_t * __a, int8x16x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23869,7 +28114,8 @@ vst4q_s8 (int8_t * __a, int8x16x4_t val)
__builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4q_p8 (poly8_t * __a, poly8x16x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23880,7 +28126,8 @@ vst4q_p8 (poly8_t * __a, poly8x16x4_t val)
__builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4q_s16 (int16_t * __a, int16x8x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23891,7 +28138,8 @@ vst4q_s16 (int16_t * __a, int16x8x4_t val)
__builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4q_p16 (poly16_t * __a, poly16x8x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23902,7 +28150,8 @@ vst4q_p16 (poly16_t * __a, poly16x8x4_t val)
__builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4q_s32 (int32_t * __a, int32x4x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23913,7 +28162,8 @@ vst4q_s32 (int32_t * __a, int32x4x4_t val)
__builtin_aarch64_st4v4si ((__builtin_aarch64_simd_si *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4q_s64 (int64_t * __a, int64x2x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23924,7 +28174,8 @@ vst4q_s64 (int64_t * __a, int64x2x4_t val)
__builtin_aarch64_st4v2di ((__builtin_aarch64_simd_di *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4q_u8 (uint8_t * __a, uint8x16x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23935,7 +28186,8 @@ vst4q_u8 (uint8_t * __a, uint8x16x4_t val)
__builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4q_u16 (uint16_t * __a, uint16x8x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23946,7 +28198,8 @@ vst4q_u16 (uint16_t * __a, uint16x8x4_t val)
__builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4q_u32 (uint32_t * __a, uint32x4x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23957,7 +28210,8 @@ vst4q_u32 (uint32_t * __a, uint32x4x4_t val)
__builtin_aarch64_st4v4si ((__builtin_aarch64_simd_si *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4q_u64 (uint64_t * __a, uint64x2x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23968,7 +28222,8 @@ vst4q_u64 (uint64_t * __a, uint64x2x4_t val)
__builtin_aarch64_st4v2di ((__builtin_aarch64_simd_di *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4q_f16 (float16_t * __a, float16x8x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23979,7 +28234,8 @@ vst4q_f16 (float16_t * __a, float16x8x4_t val)
__builtin_aarch64_st4v8hf ((__builtin_aarch64_simd_hf *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4q_f32 (float32_t * __a, float32x4x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -23990,7 +28246,8 @@ vst4q_f32 (float32_t * __a, float32x4x4_t val)
__builtin_aarch64_st4v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
}
-__extension__ static __inline void __attribute__ ((__always_inline__))
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vst4q_f64 (float64_t * __a, float64x2x4_t val)
{
__builtin_aarch64_simd_xi __o;
@@ -24001,15 +28258,33 @@ vst4q_f64 (float64_t * __a, float64x2x4_t val)
__builtin_aarch64_st4v2df ((__builtin_aarch64_simd_df *) __a, __o);
}
+__extension__ extern __inline void
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vst4q_p64 (poly64_t * __a, poly64x2x4_t val)
+{
+ __builtin_aarch64_simd_xi __o;
+ __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
+ (poly64x2_t) val.val[0], 0);
+ __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
+ (poly64x2_t) val.val[1], 1);
+ __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
+ (poly64x2_t) val.val[2], 2);
+ __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
+ (poly64x2_t) val.val[3], 3);
+ __builtin_aarch64_st4v2di ((__builtin_aarch64_simd_di *) __a, __o);
+}
+
/* vsub */
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubd_s64 (int64_t __a, int64_t __b)
{
return __a - __b;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vsubd_u64 (uint64_t __a, uint64_t __b)
{
return __a - __b;
@@ -24017,7 +28292,8 @@ vsubd_u64 (uint64_t __a, uint64_t __b)
/* vtbx1 */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbx1_s8 (int8x8_t __r, int8x8_t __tab, int8x8_t __idx)
{
uint8x8_t __mask = vclt_u8 (vreinterpret_u8_s8 (__idx),
@@ -24027,7 +28303,8 @@ vtbx1_s8 (int8x8_t __r, int8x8_t __tab, int8x8_t __idx)
return vbsl_s8 (__mask, __tbl, __r);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbx1_u8 (uint8x8_t __r, uint8x8_t __tab, uint8x8_t __idx)
{
uint8x8_t __mask = vclt_u8 (__idx, vmov_n_u8 (8));
@@ -24036,7 +28313,8 @@ vtbx1_u8 (uint8x8_t __r, uint8x8_t __tab, uint8x8_t __idx)
return vbsl_u8 (__mask, __tbl, __r);
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbx1_p8 (poly8x8_t __r, poly8x8_t __tab, uint8x8_t __idx)
{
uint8x8_t __mask = vclt_u8 (__idx, vmov_n_u8 (8));
@@ -24047,7 +28325,8 @@ vtbx1_p8 (poly8x8_t __r, poly8x8_t __tab, uint8x8_t __idx)
/* vtbx3 */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbx3_s8 (int8x8_t __r, int8x8x3_t __tab, int8x8_t __idx)
{
uint8x8_t __mask = vclt_u8 (vreinterpret_u8_s8 (__idx),
@@ -24057,7 +28336,8 @@ vtbx3_s8 (int8x8_t __r, int8x8x3_t __tab, int8x8_t __idx)
return vbsl_s8 (__mask, __tbl, __r);
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbx3_u8 (uint8x8_t __r, uint8x8x3_t __tab, uint8x8_t __idx)
{
uint8x8_t __mask = vclt_u8 (__idx, vmov_n_u8 (24));
@@ -24066,7 +28346,8 @@ vtbx3_u8 (uint8x8_t __r, uint8x8x3_t __tab, uint8x8_t __idx)
return vbsl_u8 (__mask, __tbl, __r);
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbx3_p8 (poly8x8_t __r, poly8x8x3_t __tab, uint8x8_t __idx)
{
uint8x8_t __mask = vclt_u8 (__idx, vmov_n_u8 (24));
@@ -24077,7 +28358,8 @@ vtbx3_p8 (poly8x8_t __r, poly8x8x3_t __tab, uint8x8_t __idx)
/* vtbx4 */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbx4_s8 (int8x8_t __r, int8x8x4_t __tab, int8x8_t __idx)
{
int8x8_t result;
@@ -24093,7 +28375,8 @@ vtbx4_s8 (int8x8_t __r, int8x8x4_t __tab, int8x8_t __idx)
return result;
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbx4_u8 (uint8x8_t __r, uint8x8x4_t __tab, uint8x8_t __idx)
{
uint8x8_t result;
@@ -24110,7 +28393,8 @@ vtbx4_u8 (uint8x8_t __r, uint8x8x4_t __tab, uint8x8_t __idx)
return result;
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtbx4_p8 (poly8x8_t __r, poly8x8x4_t __tab, uint8x8_t __idx)
{
poly8x8_t result;
@@ -24129,7 +28413,19 @@ vtbx4_p8 (poly8x8_t __r, poly8x8x4_t __tab, uint8x8_t __idx)
/* vtrn */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrn1_f16 (float16x4_t __a, float16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 1, 7, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 2, 6});
+#endif
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1_f32 (float32x2_t __a, float32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -24139,7 +28435,8 @@ vtrn1_f32 (float32x2_t __a, float32x2_t __b)
#endif
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1_p8 (poly8x8_t __a, poly8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -24149,7 +28446,8 @@ vtrn1_p8 (poly8x8_t __a, poly8x8_t __b)
#endif
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1_p16 (poly16x4_t __a, poly16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -24159,7 +28457,8 @@ vtrn1_p16 (poly16x4_t __a, poly16x4_t __b)
#endif
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1_s8 (int8x8_t __a, int8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -24169,7 +28468,8 @@ vtrn1_s8 (int8x8_t __a, int8x8_t __b)
#endif
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1_s16 (int16x4_t __a, int16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -24179,7 +28479,8 @@ vtrn1_s16 (int16x4_t __a, int16x4_t __b)
#endif
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1_s32 (int32x2_t __a, int32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -24189,7 +28490,8 @@ vtrn1_s32 (int32x2_t __a, int32x2_t __b)
#endif
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1_u8 (uint8x8_t __a, uint8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -24199,7 +28501,8 @@ vtrn1_u8 (uint8x8_t __a, uint8x8_t __b)
#endif
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1_u16 (uint16x4_t __a, uint16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -24209,7 +28512,8 @@ vtrn1_u16 (uint16x4_t __a, uint16x4_t __b)
#endif
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1_u32 (uint32x2_t __a, uint32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -24219,7 +28523,19 @@ vtrn1_u32 (uint32x2_t __a, uint32x2_t __b)
#endif
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrn1q_f16 (float16x8_t __a, float16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 1, 11, 3, 13, 5, 15, 7});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 8, 2, 10, 4, 12, 6, 14});
+#endif
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1q_f32 (float32x4_t __a, float32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -24229,7 +28545,8 @@ vtrn1q_f32 (float32x4_t __a, float32x4_t __b)
#endif
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1q_f64 (float64x2_t __a, float64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -24239,7 +28556,8 @@ vtrn1q_f64 (float64x2_t __a, float64x2_t __b)
#endif
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1q_p8 (poly8x16_t __a, poly8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -24251,7 +28569,8 @@ vtrn1q_p8 (poly8x16_t __a, poly8x16_t __b)
#endif
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1q_p16 (poly16x8_t __a, poly16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -24261,7 +28580,8 @@ vtrn1q_p16 (poly16x8_t __a, poly16x8_t __b)
#endif
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1q_s8 (int8x16_t __a, int8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -24273,7 +28593,8 @@ vtrn1q_s8 (int8x16_t __a, int8x16_t __b)
#endif
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1q_s16 (int16x8_t __a, int16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -24283,7 +28604,8 @@ vtrn1q_s16 (int16x8_t __a, int16x8_t __b)
#endif
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1q_s32 (int32x4_t __a, int32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -24293,7 +28615,8 @@ vtrn1q_s32 (int32x4_t __a, int32x4_t __b)
#endif
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1q_s64 (int64x2_t __a, int64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -24303,7 +28626,8 @@ vtrn1q_s64 (int64x2_t __a, int64x2_t __b)
#endif
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1q_u8 (uint8x16_t __a, uint8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -24315,7 +28639,8 @@ vtrn1q_u8 (uint8x16_t __a, uint8x16_t __b)
#endif
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1q_u16 (uint16x8_t __a, uint16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -24325,7 +28650,8 @@ vtrn1q_u16 (uint16x8_t __a, uint16x8_t __b)
#endif
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1q_u32 (uint32x4_t __a, uint32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -24335,7 +28661,8 @@ vtrn1q_u32 (uint32x4_t __a, uint32x4_t __b)
#endif
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn1q_u64 (uint64x2_t __a, uint64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -24345,7 +28672,19 @@ vtrn1q_u64 (uint64x2_t __a, uint64x2_t __b)
#endif
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrn2_f16 (float16x4_t __a, float16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 0, 6, 2});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 5, 3, 7});
+#endif
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2_f32 (float32x2_t __a, float32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -24355,7 +28694,8 @@ vtrn2_f32 (float32x2_t __a, float32x2_t __b)
#endif
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2_p8 (poly8x8_t __a, poly8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -24365,7 +28705,8 @@ vtrn2_p8 (poly8x8_t __a, poly8x8_t __b)
#endif
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2_p16 (poly16x4_t __a, poly16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -24375,7 +28716,8 @@ vtrn2_p16 (poly16x4_t __a, poly16x4_t __b)
#endif
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2_s8 (int8x8_t __a, int8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -24385,7 +28727,8 @@ vtrn2_s8 (int8x8_t __a, int8x8_t __b)
#endif
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2_s16 (int16x4_t __a, int16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -24395,7 +28738,8 @@ vtrn2_s16 (int16x4_t __a, int16x4_t __b)
#endif
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2_s32 (int32x2_t __a, int32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -24405,7 +28749,8 @@ vtrn2_s32 (int32x2_t __a, int32x2_t __b)
#endif
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2_u8 (uint8x8_t __a, uint8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -24415,7 +28760,8 @@ vtrn2_u8 (uint8x8_t __a, uint8x8_t __b)
#endif
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2_u16 (uint16x4_t __a, uint16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -24425,7 +28771,8 @@ vtrn2_u16 (uint16x4_t __a, uint16x4_t __b)
#endif
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2_u32 (uint32x2_t __a, uint32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -24435,7 +28782,19 @@ vtrn2_u32 (uint32x2_t __a, uint32x2_t __b)
#endif
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrn2q_f16 (float16x8_t __a, float16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 0, 10, 2, 12, 4, 14, 6});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 9, 3, 11, 5, 13, 7, 15});
+#endif
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2q_f32 (float32x4_t __a, float32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -24445,7 +28804,8 @@ vtrn2q_f32 (float32x4_t __a, float32x4_t __b)
#endif
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2q_f64 (float64x2_t __a, float64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -24455,7 +28815,8 @@ vtrn2q_f64 (float64x2_t __a, float64x2_t __b)
#endif
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2q_p8 (poly8x16_t __a, poly8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -24467,7 +28828,8 @@ vtrn2q_p8 (poly8x16_t __a, poly8x16_t __b)
#endif
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2q_p16 (poly16x8_t __a, poly16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -24477,7 +28839,8 @@ vtrn2q_p16 (poly16x8_t __a, poly16x8_t __b)
#endif
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2q_s8 (int8x16_t __a, int8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -24489,7 +28852,8 @@ vtrn2q_s8 (int8x16_t __a, int8x16_t __b)
#endif
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2q_s16 (int16x8_t __a, int16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -24499,7 +28863,8 @@ vtrn2q_s16 (int16x8_t __a, int16x8_t __b)
#endif
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2q_s32 (int32x4_t __a, int32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -24509,7 +28874,8 @@ vtrn2q_s32 (int32x4_t __a, int32x4_t __b)
#endif
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2q_s64 (int64x2_t __a, int64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -24519,7 +28885,8 @@ vtrn2q_s64 (int64x2_t __a, int64x2_t __b)
#endif
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2q_u8 (uint8x16_t __a, uint8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -24531,7 +28898,8 @@ vtrn2q_u8 (uint8x16_t __a, uint8x16_t __b)
#endif
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2q_u16 (uint16x8_t __a, uint16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -24541,7 +28909,8 @@ vtrn2q_u16 (uint16x8_t __a, uint16x8_t __b)
#endif
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2q_u32 (uint32x4_t __a, uint32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -24551,7 +28920,8 @@ vtrn2q_u32 (uint32x4_t __a, uint32x4_t __b)
#endif
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn2q_u64 (uint64x2_t __a, uint64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -24561,109 +28931,141 @@ vtrn2q_u64 (uint64x2_t __a, uint64x2_t __b)
#endif
}
-__extension__ static __inline float32x2x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrn_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return (float16x4x2_t) {vtrn1_f16 (__a, __b), vtrn2_f16 (__a, __b)};
+}
+
+__extension__ extern __inline float32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn_f32 (float32x2_t a, float32x2_t b)
{
return (float32x2x2_t) {vtrn1_f32 (a, b), vtrn2_f32 (a, b)};
}
-__extension__ static __inline poly8x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn_p8 (poly8x8_t a, poly8x8_t b)
{
return (poly8x8x2_t) {vtrn1_p8 (a, b), vtrn2_p8 (a, b)};
}
-__extension__ static __inline poly16x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn_p16 (poly16x4_t a, poly16x4_t b)
{
return (poly16x4x2_t) {vtrn1_p16 (a, b), vtrn2_p16 (a, b)};
}
-__extension__ static __inline int8x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn_s8 (int8x8_t a, int8x8_t b)
{
return (int8x8x2_t) {vtrn1_s8 (a, b), vtrn2_s8 (a, b)};
}
-__extension__ static __inline int16x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn_s16 (int16x4_t a, int16x4_t b)
{
return (int16x4x2_t) {vtrn1_s16 (a, b), vtrn2_s16 (a, b)};
}
-__extension__ static __inline int32x2x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn_s32 (int32x2_t a, int32x2_t b)
{
return (int32x2x2_t) {vtrn1_s32 (a, b), vtrn2_s32 (a, b)};
}
-__extension__ static __inline uint8x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn_u8 (uint8x8_t a, uint8x8_t b)
{
return (uint8x8x2_t) {vtrn1_u8 (a, b), vtrn2_u8 (a, b)};
}
-__extension__ static __inline uint16x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn_u16 (uint16x4_t a, uint16x4_t b)
{
return (uint16x4x2_t) {vtrn1_u16 (a, b), vtrn2_u16 (a, b)};
}
-__extension__ static __inline uint32x2x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrn_u32 (uint32x2_t a, uint32x2_t b)
{
return (uint32x2x2_t) {vtrn1_u32 (a, b), vtrn2_u32 (a, b)};
}
-__extension__ static __inline float32x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vtrnq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return (float16x8x2_t) {vtrn1q_f16 (__a, __b), vtrn2q_f16 (__a, __b)};
+}
+
+__extension__ extern __inline float32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_f32 (float32x4_t a, float32x4_t b)
{
return (float32x4x2_t) {vtrn1q_f32 (a, b), vtrn2q_f32 (a, b)};
}
-__extension__ static __inline poly8x16x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_p8 (poly8x16_t a, poly8x16_t b)
{
return (poly8x16x2_t) {vtrn1q_p8 (a, b), vtrn2q_p8 (a, b)};
}
-__extension__ static __inline poly16x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_p16 (poly16x8_t a, poly16x8_t b)
{
return (poly16x8x2_t) {vtrn1q_p16 (a, b), vtrn2q_p16 (a, b)};
}
-__extension__ static __inline int8x16x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_s8 (int8x16_t a, int8x16_t b)
{
return (int8x16x2_t) {vtrn1q_s8 (a, b), vtrn2q_s8 (a, b)};
}
-__extension__ static __inline int16x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_s16 (int16x8_t a, int16x8_t b)
{
return (int16x8x2_t) {vtrn1q_s16 (a, b), vtrn2q_s16 (a, b)};
}
-__extension__ static __inline int32x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_s32 (int32x4_t a, int32x4_t b)
{
return (int32x4x2_t) {vtrn1q_s32 (a, b), vtrn2q_s32 (a, b)};
}
-__extension__ static __inline uint8x16x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_u8 (uint8x16_t a, uint8x16_t b)
{
return (uint8x16x2_t) {vtrn1q_u8 (a, b), vtrn2q_u8 (a, b)};
}
-__extension__ static __inline uint16x8x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_u16 (uint16x8_t a, uint16x8_t b)
{
return (uint16x8x2_t) {vtrn1q_u16 (a, b), vtrn2q_u16 (a, b)};
}
-__extension__ static __inline uint32x4x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_u32 (uint32x4_t a, uint32x4_t b)
{
return (uint32x4x2_t) {vtrn1q_u32 (a, b), vtrn2q_u32 (a, b)};
@@ -24671,109 +29073,127 @@ vtrnq_u32 (uint32x4_t a, uint32x4_t b)
/* vtst */
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtst_s8 (int8x8_t __a, int8x8_t __b)
{
return (uint8x8_t) ((__a & __b) != 0);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtst_s16 (int16x4_t __a, int16x4_t __b)
{
return (uint16x4_t) ((__a & __b) != 0);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtst_s32 (int32x2_t __a, int32x2_t __b)
{
return (uint32x2_t) ((__a & __b) != 0);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtst_s64 (int64x1_t __a, int64x1_t __b)
{
return (uint64x1_t) ((__a & __b) != __AARCH64_INT64_C (0));
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtst_u8 (uint8x8_t __a, uint8x8_t __b)
{
return ((__a & __b) != 0);
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtst_u16 (uint16x4_t __a, uint16x4_t __b)
{
return ((__a & __b) != 0);
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtst_u32 (uint32x2_t __a, uint32x2_t __b)
{
return ((__a & __b) != 0);
}
-__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtst_u64 (uint64x1_t __a, uint64x1_t __b)
{
return ((__a & __b) != __AARCH64_UINT64_C (0));
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtstq_s8 (int8x16_t __a, int8x16_t __b)
{
return (uint8x16_t) ((__a & __b) != 0);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtstq_s16 (int16x8_t __a, int16x8_t __b)
{
return (uint16x8_t) ((__a & __b) != 0);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtstq_s32 (int32x4_t __a, int32x4_t __b)
{
return (uint32x4_t) ((__a & __b) != 0);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtstq_s64 (int64x2_t __a, int64x2_t __b)
{
return (uint64x2_t) ((__a & __b) != __AARCH64_INT64_C (0));
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtstq_u8 (uint8x16_t __a, uint8x16_t __b)
{
return ((__a & __b) != 0);
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtstq_u16 (uint16x8_t __a, uint16x8_t __b)
{
return ((__a & __b) != 0);
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtstq_u32 (uint32x4_t __a, uint32x4_t __b)
{
return ((__a & __b) != 0);
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtstq_u64 (uint64x2_t __a, uint64x2_t __b)
{
return ((__a & __b) != __AARCH64_UINT64_C (0));
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtstd_s64 (int64_t __a, int64_t __b)
{
return (__a & __b) ? -1ll : 0ll;
}
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vtstd_u64 (uint64_t __a, uint64_t __b)
{
return (__a & __b) ? -1ll : 0ll;
@@ -24781,81 +29201,93 @@ vtstd_u64 (uint64_t __a, uint64_t __b)
/* vuqadd */
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuqadd_s8 (int8x8_t __a, uint8x8_t __b)
{
return __builtin_aarch64_suqaddv8qi_ssu (__a, __b);
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuqadd_s16 (int16x4_t __a, uint16x4_t __b)
{
return __builtin_aarch64_suqaddv4hi_ssu (__a, __b);
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuqadd_s32 (int32x2_t __a, uint32x2_t __b)
{
return __builtin_aarch64_suqaddv2si_ssu (__a, __b);
}
-__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x1_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuqadd_s64 (int64x1_t __a, uint64x1_t __b)
{
return (int64x1_t) {__builtin_aarch64_suqadddi_ssu (__a[0], __b[0])};
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuqaddq_s8 (int8x16_t __a, uint8x16_t __b)
{
return __builtin_aarch64_suqaddv16qi_ssu (__a, __b);
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuqaddq_s16 (int16x8_t __a, uint16x8_t __b)
{
return __builtin_aarch64_suqaddv8hi_ssu (__a, __b);
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuqaddq_s32 (int32x4_t __a, uint32x4_t __b)
{
return __builtin_aarch64_suqaddv4si_ssu (__a, __b);
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuqaddq_s64 (int64x2_t __a, uint64x2_t __b)
{
return __builtin_aarch64_suqaddv2di_ssu (__a, __b);
}
-__extension__ static __inline int8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuqaddb_s8 (int8_t __a, uint8_t __b)
{
return __builtin_aarch64_suqaddqi_ssu (__a, __b);
}
-__extension__ static __inline int16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuqaddh_s16 (int16_t __a, uint16_t __b)
{
return __builtin_aarch64_suqaddhi_ssu (__a, __b);
}
-__extension__ static __inline int32_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuqadds_s32 (int32_t __a, uint32_t __b)
{
return __builtin_aarch64_suqaddsi_ssu (__a, __b);
}
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuqaddd_s64 (int64_t __a, uint64_t __b)
{
return __builtin_aarch64_suqadddi_ssu (__a, __b);
}
#define __DEFINTERLEAVE(op, rettype, intype, funcsuffix, Q) \
- __extension__ static __inline rettype \
- __attribute__ ((__always_inline__)) \
+ __extension__ extern __inline rettype \
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
v ## op ## Q ## _ ## funcsuffix (intype a, intype b) \
{ \
return (rettype) {v ## op ## 1 ## Q ## _ ## funcsuffix (a, b), \
@@ -24863,6 +29295,7 @@ vuqaddd_s64 (int64_t __a, uint64_t __b)
}
#define __INTERLEAVE_LIST(op) \
+ __DEFINTERLEAVE (op, float16x4x2_t, float16x4_t, f16,) \
__DEFINTERLEAVE (op, float32x2x2_t, float32x2_t, f32,) \
__DEFINTERLEAVE (op, poly8x8x2_t, poly8x8_t, p8,) \
__DEFINTERLEAVE (op, poly16x4x2_t, poly16x4_t, p16,) \
@@ -24872,6 +29305,7 @@ vuqaddd_s64 (int64_t __a, uint64_t __b)
__DEFINTERLEAVE (op, uint8x8x2_t, uint8x8_t, u8,) \
__DEFINTERLEAVE (op, uint16x4x2_t, uint16x4_t, u16,) \
__DEFINTERLEAVE (op, uint32x2x2_t, uint32x2_t, u32,) \
+ __DEFINTERLEAVE (op, float16x8x2_t, float16x8_t, f16, q) \
__DEFINTERLEAVE (op, float32x4x2_t, float32x4_t, f32, q) \
__DEFINTERLEAVE (op, poly8x16x2_t, poly8x16_t, p8, q) \
__DEFINTERLEAVE (op, poly16x8x2_t, poly16x8_t, p16, q) \
@@ -24884,7 +29318,19 @@ vuqaddd_s64 (int64_t __a, uint64_t __b)
/* vuzp */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzp1_f16 (float16x4_t __a, float16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 7, 1, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 2, 4, 6});
+#endif
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1_f32 (float32x2_t __a, float32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -24894,7 +29340,8 @@ vuzp1_f32 (float32x2_t __a, float32x2_t __b)
#endif
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1_p8 (poly8x8_t __a, poly8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -24904,7 +29351,8 @@ vuzp1_p8 (poly8x8_t __a, poly8x8_t __b)
#endif
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1_p16 (poly16x4_t __a, poly16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -24914,7 +29362,8 @@ vuzp1_p16 (poly16x4_t __a, poly16x4_t __b)
#endif
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1_s8 (int8x8_t __a, int8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -24924,7 +29373,8 @@ vuzp1_s8 (int8x8_t __a, int8x8_t __b)
#endif
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1_s16 (int16x4_t __a, int16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -24934,7 +29384,8 @@ vuzp1_s16 (int16x4_t __a, int16x4_t __b)
#endif
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1_s32 (int32x2_t __a, int32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -24944,7 +29395,8 @@ vuzp1_s32 (int32x2_t __a, int32x2_t __b)
#endif
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1_u8 (uint8x8_t __a, uint8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -24954,7 +29406,8 @@ vuzp1_u8 (uint8x8_t __a, uint8x8_t __b)
#endif
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1_u16 (uint16x4_t __a, uint16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -24964,7 +29417,8 @@ vuzp1_u16 (uint16x4_t __a, uint16x4_t __b)
#endif
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1_u32 (uint32x2_t __a, uint32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -24974,7 +29428,19 @@ vuzp1_u32 (uint32x2_t __a, uint32x2_t __b)
#endif
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzp1q_f16 (float16x8_t __a, float16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
+#endif
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1q_f32 (float32x4_t __a, float32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -24984,7 +29450,8 @@ vuzp1q_f32 (float32x4_t __a, float32x4_t __b)
#endif
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1q_f64 (float64x2_t __a, float64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -24994,7 +29461,8 @@ vuzp1q_f64 (float64x2_t __a, float64x2_t __b)
#endif
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1q_p8 (poly8x16_t __a, poly8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -25006,7 +29474,8 @@ vuzp1q_p8 (poly8x16_t __a, poly8x16_t __b)
#endif
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1q_p16 (poly16x8_t __a, poly16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25016,7 +29485,8 @@ vuzp1q_p16 (poly16x8_t __a, poly16x8_t __b)
#endif
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1q_s8 (int8x16_t __a, int8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -25028,7 +29498,8 @@ vuzp1q_s8 (int8x16_t __a, int8x16_t __b)
#endif
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1q_s16 (int16x8_t __a, int16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25038,7 +29509,8 @@ vuzp1q_s16 (int16x8_t __a, int16x8_t __b)
#endif
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1q_s32 (int32x4_t __a, int32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25048,7 +29520,8 @@ vuzp1q_s32 (int32x4_t __a, int32x4_t __b)
#endif
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1q_s64 (int64x2_t __a, int64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25058,7 +29531,8 @@ vuzp1q_s64 (int64x2_t __a, int64x2_t __b)
#endif
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1q_u8 (uint8x16_t __a, uint8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -25070,7 +29544,8 @@ vuzp1q_u8 (uint8x16_t __a, uint8x16_t __b)
#endif
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1q_u16 (uint16x8_t __a, uint16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25080,7 +29555,8 @@ vuzp1q_u16 (uint16x8_t __a, uint16x8_t __b)
#endif
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1q_u32 (uint32x4_t __a, uint32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25090,7 +29566,8 @@ vuzp1q_u32 (uint32x4_t __a, uint32x4_t __b)
#endif
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp1q_u64 (uint64x2_t __a, uint64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25100,7 +29577,19 @@ vuzp1q_u64 (uint64x2_t __a, uint64x2_t __b)
#endif
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzp2_f16 (float16x4_t __a, float16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 6, 0, 2});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 3, 5, 7});
+#endif
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2_f32 (float32x2_t __a, float32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25110,7 +29599,8 @@ vuzp2_f32 (float32x2_t __a, float32x2_t __b)
#endif
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2_p8 (poly8x8_t __a, poly8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25120,7 +29610,8 @@ vuzp2_p8 (poly8x8_t __a, poly8x8_t __b)
#endif
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2_p16 (poly16x4_t __a, poly16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25130,7 +29621,8 @@ vuzp2_p16 (poly16x4_t __a, poly16x4_t __b)
#endif
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2_s8 (int8x8_t __a, int8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25140,7 +29632,8 @@ vuzp2_s8 (int8x8_t __a, int8x8_t __b)
#endif
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2_s16 (int16x4_t __a, int16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25150,7 +29643,8 @@ vuzp2_s16 (int16x4_t __a, int16x4_t __b)
#endif
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2_s32 (int32x2_t __a, int32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25160,7 +29654,8 @@ vuzp2_s32 (int32x2_t __a, int32x2_t __b)
#endif
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2_u8 (uint8x8_t __a, uint8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25170,7 +29665,8 @@ vuzp2_u8 (uint8x8_t __a, uint8x8_t __b)
#endif
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2_u16 (uint16x4_t __a, uint16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25180,7 +29676,8 @@ vuzp2_u16 (uint16x4_t __a, uint16x4_t __b)
#endif
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2_u32 (uint32x2_t __a, uint32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25190,7 +29687,19 @@ vuzp2_u32 (uint32x2_t __a, uint32x2_t __b)
#endif
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vuzp2q_f16 (float16x8_t __a, float16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
+#endif
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2q_f32 (float32x4_t __a, float32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25200,7 +29709,8 @@ vuzp2q_f32 (float32x4_t __a, float32x4_t __b)
#endif
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2q_f64 (float64x2_t __a, float64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25210,7 +29720,8 @@ vuzp2q_f64 (float64x2_t __a, float64x2_t __b)
#endif
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2q_p8 (poly8x16_t __a, poly8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -25222,7 +29733,8 @@ vuzp2q_p8 (poly8x16_t __a, poly8x16_t __b)
#endif
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2q_p16 (poly16x8_t __a, poly16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25232,7 +29744,8 @@ vuzp2q_p16 (poly16x8_t __a, poly16x8_t __b)
#endif
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2q_s8 (int8x16_t __a, int8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -25244,7 +29757,8 @@ vuzp2q_s8 (int8x16_t __a, int8x16_t __b)
#endif
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2q_s16 (int16x8_t __a, int16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25254,7 +29768,8 @@ vuzp2q_s16 (int16x8_t __a, int16x8_t __b)
#endif
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2q_s32 (int32x4_t __a, int32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25264,7 +29779,8 @@ vuzp2q_s32 (int32x4_t __a, int32x4_t __b)
#endif
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2q_s64 (int64x2_t __a, int64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25274,7 +29790,8 @@ vuzp2q_s64 (int64x2_t __a, int64x2_t __b)
#endif
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2q_u8 (uint8x16_t __a, uint8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -25286,7 +29803,8 @@ vuzp2q_u8 (uint8x16_t __a, uint8x16_t __b)
#endif
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2q_u16 (uint16x8_t __a, uint16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25296,7 +29814,8 @@ vuzp2q_u16 (uint16x8_t __a, uint16x8_t __b)
#endif
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2q_u32 (uint32x4_t __a, uint32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25306,7 +29825,8 @@ vuzp2q_u32 (uint32x4_t __a, uint32x4_t __b)
#endif
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vuzp2q_u64 (uint64x2_t __a, uint64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25320,7 +29840,19 @@ __INTERLEAVE_LIST (uzp)
/* vzip */
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzip1_f16 (float16x4_t __a, float16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {6, 2, 7, 3});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 1, 5});
+#endif
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1_f32 (float32x2_t __a, float32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25330,7 +29862,8 @@ vzip1_f32 (float32x2_t __a, float32x2_t __b)
#endif
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1_p8 (poly8x8_t __a, poly8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25340,7 +29873,8 @@ vzip1_p8 (poly8x8_t __a, poly8x8_t __b)
#endif
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1_p16 (poly16x4_t __a, poly16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25350,7 +29884,8 @@ vzip1_p16 (poly16x4_t __a, poly16x4_t __b)
#endif
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1_s8 (int8x8_t __a, int8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25360,7 +29895,8 @@ vzip1_s8 (int8x8_t __a, int8x8_t __b)
#endif
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1_s16 (int16x4_t __a, int16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25370,7 +29906,8 @@ vzip1_s16 (int16x4_t __a, int16x4_t __b)
#endif
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1_s32 (int32x2_t __a, int32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25380,7 +29917,8 @@ vzip1_s32 (int32x2_t __a, int32x2_t __b)
#endif
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1_u8 (uint8x8_t __a, uint8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25390,7 +29928,8 @@ vzip1_u8 (uint8x8_t __a, uint8x8_t __b)
#endif
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1_u16 (uint16x4_t __a, uint16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25400,7 +29939,8 @@ vzip1_u16 (uint16x4_t __a, uint16x4_t __b)
#endif
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1_u32 (uint32x2_t __a, uint32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25410,7 +29950,21 @@ vzip1_u32 (uint32x2_t __a, uint32x2_t __b)
#endif
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzip1q_f16 (float16x8_t __a, float16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b,
+ (uint16x8_t) {12, 4, 13, 5, 14, 6, 15, 7});
+#else
+ return __builtin_shuffle (__a, __b,
+ (uint16x8_t) {0, 8, 1, 9, 2, 10, 3, 11});
+#endif
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1q_f32 (float32x4_t __a, float32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25420,7 +29974,8 @@ vzip1q_f32 (float32x4_t __a, float32x4_t __b)
#endif
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1q_f64 (float64x2_t __a, float64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25430,7 +29985,8 @@ vzip1q_f64 (float64x2_t __a, float64x2_t __b)
#endif
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1q_p8 (poly8x16_t __a, poly8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -25442,7 +29998,8 @@ vzip1q_p8 (poly8x16_t __a, poly8x16_t __b)
#endif
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1q_p16 (poly16x8_t __a, poly16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25453,7 +30010,8 @@ vzip1q_p16 (poly16x8_t __a, poly16x8_t __b)
#endif
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1q_s8 (int8x16_t __a, int8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -25465,7 +30023,8 @@ vzip1q_s8 (int8x16_t __a, int8x16_t __b)
#endif
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1q_s16 (int16x8_t __a, int16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25476,7 +30035,8 @@ vzip1q_s16 (int16x8_t __a, int16x8_t __b)
#endif
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1q_s32 (int32x4_t __a, int32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25486,7 +30046,8 @@ vzip1q_s32 (int32x4_t __a, int32x4_t __b)
#endif
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1q_s64 (int64x2_t __a, int64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25496,7 +30057,8 @@ vzip1q_s64 (int64x2_t __a, int64x2_t __b)
#endif
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1q_u8 (uint8x16_t __a, uint8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -25508,7 +30070,8 @@ vzip1q_u8 (uint8x16_t __a, uint8x16_t __b)
#endif
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1q_u16 (uint16x8_t __a, uint16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25519,7 +30082,8 @@ vzip1q_u16 (uint16x8_t __a, uint16x8_t __b)
#endif
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1q_u32 (uint32x4_t __a, uint32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25529,7 +30093,8 @@ vzip1q_u32 (uint32x4_t __a, uint32x4_t __b)
#endif
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip1q_u64 (uint64x2_t __a, uint64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25539,7 +30104,19 @@ vzip1q_u64 (uint64x2_t __a, uint64x2_t __b)
#endif
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzip2_f16 (float16x4_t __a, float16x4_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 0, 5, 1});
+#else
+ return __builtin_shuffle (__a, __b, (uint16x4_t) {2, 6, 3, 7});
+#endif
+}
+
+__extension__ extern __inline float32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2_f32 (float32x2_t __a, float32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25549,7 +30126,8 @@ vzip2_f32 (float32x2_t __a, float32x2_t __b)
#endif
}
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2_p8 (poly8x8_t __a, poly8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25559,7 +30137,8 @@ vzip2_p8 (poly8x8_t __a, poly8x8_t __b)
#endif
}
-__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2_p16 (poly16x4_t __a, poly16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25569,7 +30148,8 @@ vzip2_p16 (poly16x4_t __a, poly16x4_t __b)
#endif
}
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2_s8 (int8x8_t __a, int8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25579,7 +30159,8 @@ vzip2_s8 (int8x8_t __a, int8x8_t __b)
#endif
}
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2_s16 (int16x4_t __a, int16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25589,7 +30170,8 @@ vzip2_s16 (int16x4_t __a, int16x4_t __b)
#endif
}
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2_s32 (int32x2_t __a, int32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25599,7 +30181,8 @@ vzip2_s32 (int32x2_t __a, int32x2_t __b)
#endif
}
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2_u8 (uint8x8_t __a, uint8x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25609,7 +30192,8 @@ vzip2_u8 (uint8x8_t __a, uint8x8_t __b)
#endif
}
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2_u16 (uint16x4_t __a, uint16x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25619,7 +30203,8 @@ vzip2_u16 (uint16x4_t __a, uint16x4_t __b)
#endif
}
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2_u32 (uint32x2_t __a, uint32x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25629,7 +30214,21 @@ vzip2_u32 (uint32x2_t __a, uint32x2_t __b)
#endif
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vzip2q_f16 (float16x8_t __a, float16x8_t __b)
+{
+#ifdef __AARCH64EB__
+ return __builtin_shuffle (__a, __b,
+ (uint16x8_t) {8, 0, 9, 1, 10, 2, 11, 3});
+#else
+ return __builtin_shuffle (__a, __b,
+ (uint16x8_t) {4, 12, 5, 13, 6, 14, 7, 15});
+#endif
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2q_f32 (float32x4_t __a, float32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25639,7 +30238,8 @@ vzip2q_f32 (float32x4_t __a, float32x4_t __b)
#endif
}
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline float64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2q_f64 (float64x2_t __a, float64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25649,7 +30249,8 @@ vzip2q_f64 (float64x2_t __a, float64x2_t __b)
#endif
}
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2q_p8 (poly8x16_t __a, poly8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -25661,7 +30262,8 @@ vzip2q_p8 (poly8x16_t __a, poly8x16_t __b)
#endif
}
-__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline poly16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2q_p16 (poly16x8_t __a, poly16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25672,7 +30274,8 @@ vzip2q_p16 (poly16x8_t __a, poly16x8_t __b)
#endif
}
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2q_s8 (int8x16_t __a, int8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -25684,7 +30287,8 @@ vzip2q_s8 (int8x16_t __a, int8x16_t __b)
#endif
}
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2q_s16 (int16x8_t __a, int16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25695,7 +30299,8 @@ vzip2q_s16 (int16x8_t __a, int16x8_t __b)
#endif
}
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2q_s32 (int32x4_t __a, int32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25705,7 +30310,8 @@ vzip2q_s32 (int32x4_t __a, int32x4_t __b)
#endif
}
-__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2q_s64 (int64x2_t __a, int64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25715,7 +30321,8 @@ vzip2q_s64 (int64x2_t __a, int64x2_t __b)
#endif
}
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2q_u8 (uint8x16_t __a, uint8x16_t __b)
{
#ifdef __AARCH64EB__
@@ -25727,7 +30334,8 @@ vzip2q_u8 (uint8x16_t __a, uint8x16_t __b)
#endif
}
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2q_u16 (uint16x8_t __a, uint16x8_t __b)
{
#ifdef __AARCH64EB__
@@ -25738,7 +30346,8 @@ vzip2q_u16 (uint16x8_t __a, uint16x8_t __b)
#endif
}
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2q_u32 (uint32x4_t __a, uint32x4_t __b)
{
#ifdef __AARCH64EB__
@@ -25748,7 +30357,8 @@ vzip2q_u32 (uint32x4_t __a, uint32x4_t __b)
#endif
}
-__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vzip2q_u64 (uint64x2_t __a, uint64x2_t __b)
{
#ifdef __AARCH64EB__
@@ -25765,9 +30375,1177 @@ __INTERLEAVE_LIST (zip)
/* End of optimal implementations in approved order. */
+#pragma GCC pop_options
+
+/* ARMv8.2-A FP16 intrinsics. */
+
+#include "arm_fp16.h"
+
+#pragma GCC push_options
+#pragma GCC target ("arch=armv8.2-a+fp16")
+
+/* ARMv8.2-A FP16 one operand vector intrinsics. */
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabs_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_absv4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabsq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_absv8hf (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceqz_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_cmeqv4hf_uss (__a, vdup_n_f16 (0.0f));
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceqzq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_cmeqv8hf_uss (__a, vdupq_n_f16 (0.0f));
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgez_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_cmgev4hf_uss (__a, vdup_n_f16 (0.0f));
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgezq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_cmgev8hf_uss (__a, vdupq_n_f16 (0.0f));
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgtz_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_cmgtv4hf_uss (__a, vdup_n_f16 (0.0f));
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgtzq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_cmgtv8hf_uss (__a, vdupq_n_f16 (0.0f));
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclez_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_cmlev4hf_uss (__a, vdup_n_f16 (0.0f));
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclezq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_cmlev8hf_uss (__a, vdupq_n_f16 (0.0f));
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcltz_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_cmltv4hf_uss (__a, vdup_n_f16 (0.0f));
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcltzq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_cmltv8hf_uss (__a, vdupq_n_f16 (0.0f));
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_f16_s16 (int16x4_t __a)
+{
+ return __builtin_aarch64_floatv4hiv4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_f16_s16 (int16x8_t __a)
+{
+ return __builtin_aarch64_floatv8hiv8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_f16_u16 (uint16x4_t __a)
+{
+ return __builtin_aarch64_floatunsv4hiv4hf ((int16x4_t) __a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_f16_u16 (uint16x8_t __a)
+{
+ return __builtin_aarch64_floatunsv8hiv8hf ((int16x8_t) __a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_s16_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_lbtruncv4hfv4hi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_s16_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_lbtruncv8hfv8hi (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_u16_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_lbtruncuv4hfv4hi_us (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_u16_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_lbtruncuv8hfv8hi_us (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvta_s16_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_lroundv4hfv4hi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtaq_s16_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_lroundv8hfv8hi (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvta_u16_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_lrounduv4hfv4hi_us (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtaq_u16_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_lrounduv8hfv8hi_us (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtm_s16_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_lfloorv4hfv4hi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtmq_s16_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_lfloorv8hfv8hi (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtm_u16_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_lflooruv4hfv4hi_us (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtmq_u16_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_lflooruv8hfv8hi_us (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtn_s16_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_lfrintnv4hfv4hi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtnq_s16_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_lfrintnv8hfv8hi (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtn_u16_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_lfrintnuv4hfv4hi_us (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtnq_u16_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_lfrintnuv8hfv8hi_us (__a);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtp_s16_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_lceilv4hfv4hi (__a);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtpq_s16_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_lceilv8hfv8hi (__a);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtp_u16_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_lceiluv4hfv4hi_us (__a);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtpq_u16_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_lceiluv8hfv8hi_us (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vneg_f16 (float16x4_t __a)
+{
+ return -__a;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vnegq_f16 (float16x8_t __a)
+{
+ return -__a;
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrecpe_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_frecpev4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrecpeq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_frecpev8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrnd_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_btruncv4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_btruncv8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrnda_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_roundv4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndaq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_roundv8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndi_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_nearbyintv4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndiq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_nearbyintv8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndm_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_floorv4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndmq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_floorv8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndn_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_frintnv4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndnq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_frintnv8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndp_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_ceilv4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndpq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_ceilv8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndx_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_rintv4hf (__a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrndxq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_rintv8hf (__a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrte_f16 (float16x4_t a)
+{
+ return __builtin_aarch64_rsqrtev4hf (a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrteq_f16 (float16x8_t a)
+{
+ return __builtin_aarch64_rsqrtev8hf (a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsqrt_f16 (float16x4_t a)
+{
+ return __builtin_aarch64_sqrtv4hf (a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsqrtq_f16 (float16x8_t a)
+{
+ return __builtin_aarch64_sqrtv8hf (a);
+}
+
+/* ARMv8.2-A FP16 two operands vector intrinsics. */
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vadd_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vaddq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabd_f16 (float16x4_t a, float16x4_t b)
+{
+ return __builtin_aarch64_fabdv4hf (a, b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vabdq_f16 (float16x8_t a, float16x8_t b)
+{
+ return __builtin_aarch64_fabdv8hf (a, b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcage_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_aarch64_facgev4hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcageq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_aarch64_facgev8hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcagt_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_aarch64_facgtv4hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcagtq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_aarch64_facgtv8hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcale_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_aarch64_faclev4hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcaleq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_aarch64_faclev8hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcalt_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_aarch64_facltv4hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcaltq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_aarch64_facltv8hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceq_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_aarch64_cmeqv4hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vceqq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_aarch64_cmeqv8hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcge_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_aarch64_cmgev4hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgeq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_aarch64_cmgev8hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgt_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_aarch64_cmgtv4hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcgtq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_aarch64_cmgtv8hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcle_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_aarch64_cmlev4hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcleq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_aarch64_cmlev8hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vclt_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_aarch64_cmltv4hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcltq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_aarch64_cmltv8hf_uss (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_f16_s16 (int16x4_t __a, const int __b)
+{
+ return __builtin_aarch64_scvtfv4hi (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_f16_s16 (int16x8_t __a, const int __b)
+{
+ return __builtin_aarch64_scvtfv8hi (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_f16_u16 (uint16x4_t __a, const int __b)
+{
+ return __builtin_aarch64_ucvtfv4hi_sus (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_f16_u16 (uint16x8_t __a, const int __b)
+{
+ return __builtin_aarch64_ucvtfv8hi_sus (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_s16_f16 (float16x4_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzsv4hf (__a, __b);
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_s16_f16 (float16x8_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzsv8hf (__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvt_n_u16_f16 (float16x4_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzuv4hf_uss (__a, __b);
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vcvtq_n_u16_f16 (float16x8_t __a, const int __b)
+{
+ return __builtin_aarch64_fcvtzuv8hf_uss (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdiv_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __a / __b;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vdivq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __a / __b;
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmax_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_aarch64_smax_nanv4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_aarch64_smax_nanv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxnm_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_aarch64_fmaxv4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxnmq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_aarch64_fmaxv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmin_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_aarch64_smin_nanv4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_aarch64_smin_nanv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminnm_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_aarch64_fminv4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminnmq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_aarch64_fminv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __a * __b;
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulx_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_aarch64_fmulxv4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulxq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_aarch64_fmulxv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpadd_f16 (float16x4_t a, float16x4_t b)
+{
+ return __builtin_aarch64_faddpv4hf (a, b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpaddq_f16 (float16x8_t a, float16x8_t b)
+{
+ return __builtin_aarch64_faddpv8hf (a, b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmax_f16 (float16x4_t a, float16x4_t b)
+{
+ return __builtin_aarch64_smax_nanpv4hf (a, b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmaxq_f16 (float16x8_t a, float16x8_t b)
+{
+ return __builtin_aarch64_smax_nanpv8hf (a, b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmaxnm_f16 (float16x4_t a, float16x4_t b)
+{
+ return __builtin_aarch64_smaxpv4hf (a, b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmaxnmq_f16 (float16x8_t a, float16x8_t b)
+{
+ return __builtin_aarch64_smaxpv8hf (a, b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpmin_f16 (float16x4_t a, float16x4_t b)
+{
+ return __builtin_aarch64_smin_nanpv4hf (a, b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpminq_f16 (float16x8_t a, float16x8_t b)
+{
+ return __builtin_aarch64_smin_nanpv8hf (a, b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpminnm_f16 (float16x4_t a, float16x4_t b)
+{
+ return __builtin_aarch64_sminpv4hf (a, b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vpminnmq_f16 (float16x8_t a, float16x8_t b)
+{
+ return __builtin_aarch64_sminpv8hf (a, b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrecps_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __builtin_aarch64_frecpsv4hf (__a, __b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrecpsq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __builtin_aarch64_frecpsv8hf (__a, __b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrts_f16 (float16x4_t a, float16x4_t b)
+{
+ return __builtin_aarch64_rsqrtsv4hf (a, b);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vrsqrtsq_f16 (float16x8_t a, float16x8_t b)
+{
+ return __builtin_aarch64_rsqrtsv8hf (a, b);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsub_f16 (float16x4_t __a, float16x4_t __b)
+{
+ return __a - __b;
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vsubq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __a - __b;
+}
+
+/* ARMv8.2-A FP16 three operands vector intrinsics. */
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfma_f16 (float16x4_t __a, float16x4_t __b, float16x4_t __c)
+{
+ return __builtin_aarch64_fmav4hf (__b, __c, __a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __builtin_aarch64_fmav8hf (__b, __c, __a);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfms_f16 (float16x4_t __a, float16x4_t __b, float16x4_t __c)
+{
+ return __builtin_aarch64_fnmav4hf (__b, __c, __a);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmsq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
+{
+ return __builtin_aarch64_fnmav8hf (__b, __c, __a);
+}
+
+/* ARMv8.2-A FP16 lane vector intrinsics. */
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmah_lane_f16 (float16_t __a, float16_t __b,
+ float16x4_t __c, const int __lane)
+{
+ return vfmah_f16 (__a, __b, __aarch64_vget_lane_any (__c, __lane));
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmah_laneq_f16 (float16_t __a, float16_t __b,
+ float16x8_t __c, const int __lane)
+{
+ return vfmah_f16 (__a, __b, __aarch64_vget_lane_any (__c, __lane));
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfma_lane_f16 (float16x4_t __a, float16x4_t __b,
+ float16x4_t __c, const int __lane)
+{
+ return vfma_f16 (__a, __b, __aarch64_vdup_lane_f16 (__c, __lane));
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmaq_lane_f16 (float16x8_t __a, float16x8_t __b,
+ float16x4_t __c, const int __lane)
+{
+ return vfmaq_f16 (__a, __b, __aarch64_vdupq_lane_f16 (__c, __lane));
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfma_laneq_f16 (float16x4_t __a, float16x4_t __b,
+ float16x8_t __c, const int __lane)
+{
+ return vfma_f16 (__a, __b, __aarch64_vdup_laneq_f16 (__c, __lane));
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmaq_laneq_f16 (float16x8_t __a, float16x8_t __b,
+ float16x8_t __c, const int __lane)
+{
+ return vfmaq_f16 (__a, __b, __aarch64_vdupq_laneq_f16 (__c, __lane));
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfma_n_f16 (float16x4_t __a, float16x4_t __b, float16_t __c)
+{
+ return vfma_f16 (__a, __b, vdup_n_f16 (__c));
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmaq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c)
+{
+ return vfmaq_f16 (__a, __b, vdupq_n_f16 (__c));
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmsh_lane_f16 (float16_t __a, float16_t __b,
+ float16x4_t __c, const int __lane)
+{
+ return vfmsh_f16 (__a, __b, __aarch64_vget_lane_any (__c, __lane));
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmsh_laneq_f16 (float16_t __a, float16_t __b,
+ float16x8_t __c, const int __lane)
+{
+ return vfmsh_f16 (__a, __b, __aarch64_vget_lane_any (__c, __lane));
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfms_lane_f16 (float16x4_t __a, float16x4_t __b,
+ float16x4_t __c, const int __lane)
+{
+ return vfms_f16 (__a, __b, __aarch64_vdup_lane_f16 (__c, __lane));
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmsq_lane_f16 (float16x8_t __a, float16x8_t __b,
+ float16x4_t __c, const int __lane)
+{
+ return vfmsq_f16 (__a, __b, __aarch64_vdupq_lane_f16 (__c, __lane));
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfms_laneq_f16 (float16x4_t __a, float16x4_t __b,
+ float16x8_t __c, const int __lane)
+{
+ return vfms_f16 (__a, __b, __aarch64_vdup_laneq_f16 (__c, __lane));
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmsq_laneq_f16 (float16x8_t __a, float16x8_t __b,
+ float16x8_t __c, const int __lane)
+{
+ return vfmsq_f16 (__a, __b, __aarch64_vdupq_laneq_f16 (__c, __lane));
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfms_n_f16 (float16x4_t __a, float16x4_t __b, float16_t __c)
+{
+ return vfms_f16 (__a, __b, vdup_n_f16 (__c));
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vfmsq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c)
+{
+ return vfmsq_f16 (__a, __b, vdupq_n_f16 (__c));
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulh_lane_f16 (float16_t __a, float16x4_t __b, const int __lane)
+{
+ return __a * __aarch64_vget_lane_any (__b, __lane);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_lane_f16 (float16x4_t __a, float16x4_t __b, const int __lane)
+{
+ return vmul_f16 (__a, vdup_n_f16 (__aarch64_vget_lane_any (__b, __lane)));
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_lane_f16 (float16x8_t __a, float16x4_t __b, const int __lane)
+{
+ return vmulq_f16 (__a, vdupq_n_f16 (__aarch64_vget_lane_any (__b, __lane)));
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulh_laneq_f16 (float16_t __a, float16x8_t __b, const int __lane)
+{
+ return __a * __aarch64_vget_lane_any (__b, __lane);
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_laneq_f16 (float16x4_t __a, float16x8_t __b, const int __lane)
+{
+ return vmul_f16 (__a, vdup_n_f16 (__aarch64_vget_lane_any (__b, __lane)));
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_laneq_f16 (float16x8_t __a, float16x8_t __b, const int __lane)
+{
+ return vmulq_f16 (__a, vdupq_n_f16 (__aarch64_vget_lane_any (__b, __lane)));
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmul_n_f16 (float16x4_t __a, float16_t __b)
+{
+ return vmul_lane_f16 (__a, vdup_n_f16 (__b), 0);
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulq_n_f16 (float16x8_t __a, float16_t __b)
+{
+ return vmulq_laneq_f16 (__a, vdupq_n_f16 (__b), 0);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulxh_lane_f16 (float16_t __a, float16x4_t __b, const int __lane)
+{
+ return vmulxh_f16 (__a, __aarch64_vget_lane_any (__b, __lane));
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulx_lane_f16 (float16x4_t __a, float16x4_t __b, const int __lane)
+{
+ return vmulx_f16 (__a, __aarch64_vdup_lane_f16 (__b, __lane));
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulxq_lane_f16 (float16x8_t __a, float16x4_t __b, const int __lane)
+{
+ return vmulxq_f16 (__a, __aarch64_vdupq_lane_f16 (__b, __lane));
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulxh_laneq_f16 (float16_t __a, float16x8_t __b, const int __lane)
+{
+ return vmulxh_f16 (__a, __aarch64_vget_lane_any (__b, __lane));
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulx_laneq_f16 (float16x4_t __a, float16x8_t __b, const int __lane)
+{
+ return vmulx_f16 (__a, __aarch64_vdup_laneq_f16 (__b, __lane));
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulxq_laneq_f16 (float16x8_t __a, float16x8_t __b, const int __lane)
+{
+ return vmulxq_f16 (__a, __aarch64_vdupq_laneq_f16 (__b, __lane));
+}
+
+__extension__ extern __inline float16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulx_n_f16 (float16x4_t __a, float16_t __b)
+{
+ return vmulx_f16 (__a, vdup_n_f16 (__b));
+}
+
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmulxq_n_f16 (float16x8_t __a, float16_t __b)
+{
+ return vmulxq_f16 (__a, vdupq_n_f16 (__b));
+}
+
+/* ARMv8.2-A FP16 reduction vector intrinsics. */
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxv_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_reduc_smax_nan_scal_v4hf (__a);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxvq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_reduc_smax_nan_scal_v8hf (__a);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminv_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_reduc_smin_nan_scal_v4hf (__a);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminvq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_reduc_smin_nan_scal_v8hf (__a);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxnmv_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_reduc_smax_scal_v4hf (__a);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmaxnmvq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_reduc_smax_scal_v8hf (__a);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminnmv_f16 (float16x4_t __a)
+{
+ return __builtin_aarch64_reduc_smin_scal_v4hf (__a);
+}
+
+__extension__ extern __inline float16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vminnmvq_f16 (float16x8_t __a)
+{
+ return __builtin_aarch64_reduc_smin_scal_v8hf (__a);
+}
+
+#pragma GCC pop_options
+
#undef __aarch64_vget_lane_any
#undef __aarch64_vdup_lane_any
+#undef __aarch64_vdup_lane_f16
#undef __aarch64_vdup_lane_f32
#undef __aarch64_vdup_lane_f64
#undef __aarch64_vdup_lane_p8
@@ -25780,6 +31558,7 @@ __INTERLEAVE_LIST (zip)
#undef __aarch64_vdup_lane_u16
#undef __aarch64_vdup_lane_u32
#undef __aarch64_vdup_lane_u64
+#undef __aarch64_vdup_laneq_f16
#undef __aarch64_vdup_laneq_f32
#undef __aarch64_vdup_laneq_f64
#undef __aarch64_vdup_laneq_p8
@@ -25792,6 +31571,7 @@ __INTERLEAVE_LIST (zip)
#undef __aarch64_vdup_laneq_u16
#undef __aarch64_vdup_laneq_u32
#undef __aarch64_vdup_laneq_u64
+#undef __aarch64_vdupq_lane_f16
#undef __aarch64_vdupq_lane_f32
#undef __aarch64_vdupq_lane_f64
#undef __aarch64_vdupq_lane_p8
@@ -25804,6 +31584,7 @@ __INTERLEAVE_LIST (zip)
#undef __aarch64_vdupq_lane_u16
#undef __aarch64_vdupq_lane_u32
#undef __aarch64_vdupq_lane_u64
+#undef __aarch64_vdupq_laneq_f16
#undef __aarch64_vdupq_laneq_f32
#undef __aarch64_vdupq_laneq_f64
#undef __aarch64_vdupq_laneq_p8
@@ -25817,6 +31598,4 @@ __INTERLEAVE_LIST (zip)
#undef __aarch64_vdupq_laneq_u32
#undef __aarch64_vdupq_laneq_u64
-#pragma GCC pop_options
-
#endif
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/float.h b/lib/gcc/aarch64-linux-android/6.3.1/include/float.h
index 862f7cc..862f7cc 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/float.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/float.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/iso646.h b/lib/gcc/aarch64-linux-android/6.3.1/include/iso646.h
index 198b344..198b344 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/iso646.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/iso646.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/NXConstStr.h b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/NXConstStr.h
index e760ad5..e760ad5 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/NXConstStr.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/NXConstStr.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/Object.h b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/Object.h
index f69f813..f69f813 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/Object.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/Object.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/Protocol.h b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/Protocol.h
index d58febd..d58febd 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/Protocol.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/Protocol.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/message.h b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/message.h
index 1f94775..1f94775 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/message.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/message.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/objc-decls.h b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/objc-decls.h
index 380d9da..380d9da 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/objc-decls.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/objc-decls.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/objc-exception.h b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/objc-exception.h
index 6a4ab68..6a4ab68 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/objc-exception.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/objc-exception.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/objc-sync.h b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/objc-sync.h
index 5a8df65..5a8df65 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/objc-sync.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/objc-sync.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/objc.h b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/objc.h
index 37391a4..37391a4 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/objc.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/objc.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/runtime.h b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/runtime.h
index d58bf73..d58bf73 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/runtime.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/runtime.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/thr.h b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/thr.h
index 5dd4fdc..5dd4fdc 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/objc/thr.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/objc/thr.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/omp.h b/lib/gcc/aarch64-linux-android/6.3.1/include/omp.h
index 8026bbb..8026bbb 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/omp.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/omp.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/openacc.h b/lib/gcc/aarch64-linux-android/6.3.1/include/openacc.h
index 7ea8794..7ea8794 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/openacc.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/openacc.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/stdalign.h b/lib/gcc/aarch64-linux-android/6.3.1/include/stdalign.h
index 54a4d71..54a4d71 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/stdalign.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/stdalign.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/stdarg.h b/lib/gcc/aarch64-linux-android/6.3.1/include/stdarg.h
index 6525152..6525152 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/stdarg.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/stdarg.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/stdatomic.h b/lib/gcc/aarch64-linux-android/6.3.1/include/stdatomic.h
index 0959948..0959948 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/stdatomic.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/stdatomic.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/stdbool.h b/lib/gcc/aarch64-linux-android/6.3.1/include/stdbool.h
index dec666a..dec666a 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/stdbool.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/stdbool.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/stddef.h b/lib/gcc/aarch64-linux-android/6.3.1/include/stddef.h
index d711530..d711530 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/stddef.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/stddef.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/stdfix.h b/lib/gcc/aarch64-linux-android/6.3.1/include/stdfix.h
index 2659c0f..2659c0f 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/stdfix.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/stdfix.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/stdint-gcc.h b/lib/gcc/aarch64-linux-android/6.3.1/include/stdint-gcc.h
index 6d208af..6d208af 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/stdint-gcc.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/stdint-gcc.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/stdint.h b/lib/gcc/aarch64-linux-android/6.3.1/include/stdint.h
index 83b6f70..83b6f70 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/stdint.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/stdint.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/stdnoreturn.h b/lib/gcc/aarch64-linux-android/6.3.1/include/stdnoreturn.h
index 614265b..614265b 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/stdnoreturn.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/stdnoreturn.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/unwind.h b/lib/gcc/aarch64-linux-android/6.3.1/include/unwind.h
index f2e3ad6..f2e3ad6 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/unwind.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/unwind.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.0/include/varargs.h b/lib/gcc/aarch64-linux-android/6.3.1/include/varargs.h
index 4b9803e..4b9803e 100644
--- a/lib/gcc/aarch64-linux-android/6.3.0/include/varargs.h
+++ b/lib/gcc/aarch64-linux-android/6.3.1/include/varargs.h
diff --git a/lib/gcc/aarch64-linux-android/6.3.1/libcaf_single.a b/lib/gcc/aarch64-linux-android/6.3.1/libcaf_single.a
new file mode 100644
index 0000000..9f88415
--- /dev/null
+++ b/lib/gcc/aarch64-linux-android/6.3.1/libcaf_single.a
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.1/libgcc.a b/lib/gcc/aarch64-linux-android/6.3.1/libgcc.a
new file mode 100644
index 0000000..7cccf0e
--- /dev/null
+++ b/lib/gcc/aarch64-linux-android/6.3.1/libgcc.a
Binary files differ
diff --git a/lib/gcc/aarch64-linux-android/6.3.1/libgcov.a b/lib/gcc/aarch64-linux-android/6.3.1/libgcov.a
new file mode 100644
index 0000000..c47445e
--- /dev/null
+++ b/lib/gcc/aarch64-linux-android/6.3.1/libgcov.a
Binary files differ
diff --git a/libexec/gcc/aarch64-linux-android/6.3.0/plugin/gengtype b/libexec/gcc/aarch64-linux-android/6.3.0/plugin/gengtype
deleted file mode 100755
index 5f64bb6..0000000
--- a/libexec/gcc/aarch64-linux-android/6.3.0/plugin/gengtype
+++ /dev/null
Binary files differ
diff --git a/libexec/gcc/aarch64-linux-android/6.3.0/cc1obj b/libexec/gcc/aarch64-linux-android/6.3.1/cc1
index 0fac5f3..c257e91 100755
--- a/libexec/gcc/aarch64-linux-android/6.3.0/cc1obj
+++ b/libexec/gcc/aarch64-linux-android/6.3.1/cc1
Binary files differ
diff --git a/libexec/gcc/aarch64-linux-android/6.3.0/cc1 b/libexec/gcc/aarch64-linux-android/6.3.1/cc1obj
index e6e9061..af7c429 100755
--- a/libexec/gcc/aarch64-linux-android/6.3.0/cc1
+++ b/libexec/gcc/aarch64-linux-android/6.3.1/cc1obj
Binary files differ
diff --git a/libexec/gcc/aarch64-linux-android/6.3.0/cc1plus b/libexec/gcc/aarch64-linux-android/6.3.1/cc1objplus
index a34986a..e3fc497 100755
--- a/libexec/gcc/aarch64-linux-android/6.3.0/cc1plus
+++ b/libexec/gcc/aarch64-linux-android/6.3.1/cc1objplus
Binary files differ
diff --git a/libexec/gcc/aarch64-linux-android/6.3.0/cc1objplus b/libexec/gcc/aarch64-linux-android/6.3.1/cc1plus
index 344a679..711c0d1 100755
--- a/libexec/gcc/aarch64-linux-android/6.3.0/cc1objplus
+++ b/libexec/gcc/aarch64-linux-android/6.3.1/cc1plus
Binary files differ
diff --git a/libexec/gcc/aarch64-linux-android/6.3.0/collect2 b/libexec/gcc/aarch64-linux-android/6.3.1/collect2
index 4c805b5..e6297b1 100755
--- a/libexec/gcc/aarch64-linux-android/6.3.0/collect2
+++ b/libexec/gcc/aarch64-linux-android/6.3.1/collect2
Binary files differ
diff --git a/libexec/gcc/aarch64-linux-android/6.3.0/f951 b/libexec/gcc/aarch64-linux-android/6.3.1/f951
index 7dabcde..af7d83d 100755
--- a/libexec/gcc/aarch64-linux-android/6.3.0/f951
+++ b/libexec/gcc/aarch64-linux-android/6.3.1/f951
Binary files differ
diff --git a/libexec/gcc/aarch64-linux-android/6.3.0/liblto_plugin.so b/libexec/gcc/aarch64-linux-android/6.3.1/liblto_plugin.so
index f25ba88..f25ba88 120000
--- a/libexec/gcc/aarch64-linux-android/6.3.0/liblto_plugin.so
+++ b/libexec/gcc/aarch64-linux-android/6.3.1/liblto_plugin.so
diff --git a/libexec/gcc/aarch64-linux-android/6.3.0/liblto_plugin.so.0 b/libexec/gcc/aarch64-linux-android/6.3.1/liblto_plugin.so.0
index f25ba88..f25ba88 120000
--- a/libexec/gcc/aarch64-linux-android/6.3.0/liblto_plugin.so.0
+++ b/libexec/gcc/aarch64-linux-android/6.3.1/liblto_plugin.so.0
diff --git a/libexec/gcc/aarch64-linux-android/6.3.0/liblto_plugin.so.0.0.0 b/libexec/gcc/aarch64-linux-android/6.3.1/liblto_plugin.so.0.0.0
index d729363..d729363 100755
--- a/libexec/gcc/aarch64-linux-android/6.3.0/liblto_plugin.so.0.0.0
+++ b/libexec/gcc/aarch64-linux-android/6.3.1/liblto_plugin.so.0.0.0
Binary files differ
diff --git a/libexec/gcc/aarch64-linux-android/6.3.0/lto-wrapper b/libexec/gcc/aarch64-linux-android/6.3.1/lto-wrapper
index 588a41e..29d8876 100755
--- a/libexec/gcc/aarch64-linux-android/6.3.0/lto-wrapper
+++ b/libexec/gcc/aarch64-linux-android/6.3.1/lto-wrapper
Binary files differ
diff --git a/libexec/gcc/aarch64-linux-android/6.3.0/lto1 b/libexec/gcc/aarch64-linux-android/6.3.1/lto1
index 24e991e..1a17401 100755
--- a/libexec/gcc/aarch64-linux-android/6.3.0/lto1
+++ b/libexec/gcc/aarch64-linux-android/6.3.1/lto1
Binary files differ
diff --git a/libexec/gcc/aarch64-linux-android/6.3.1/plugin/gengtype b/libexec/gcc/aarch64-linux-android/6.3.1/plugin/gengtype
new file mode 100755
index 0000000..58b5af4
--- /dev/null
+++ b/libexec/gcc/aarch64-linux-android/6.3.1/plugin/gengtype
Binary files differ
diff --git a/repo.prop b/repo.prop
index 48e7c4b..4cd9f14 100644
--- a/repo.prop
+++ b/repo.prop
@@ -1,18 +1,18 @@
-platform/bionic fb07c36bc061db4ca5d8348ff6bc1e60b6c53191
-platform/development 06eb77a682183d6407046975f48e0c699f6abc3d
+platform/bionic 8d0e0d495209f3bdc20edf0e5c2aa219b4cbe217
+platform/development 3646941fc6fa6c781ccc75f763ec049d92d0d3f1
platform/external/googletest 90bc60c06ca961116d13a0e07bf945067b61c011
platform/external/libcxx 637261e03552862801e9c1f19a777b02f6025271
platform/external/libcxxabi 7e39fcaaf104bde12d62a4577af9d22f8081f242
platform/external/libunwind_llvm fecc6f26cfdbfc9cf0ea2021629ac6e85b7c0113
-platform/external/llvm 4a5fef40724e2e9af15b9fc8ed87abc25682a20f
-platform/external/shaderc/glslang dbd09e0be713eaab4feba755471a32edc4d203f4
-platform/external/shaderc/shaderc 55ef1954a6cff487b08a83e89fd20570d744828b
+platform/external/llvm 9677b84d5af6168ddcc40b6b85ece3361248c956
+platform/external/shaderc/glslang 3817e324b6c4a72b0c5fe096c6a865d5fc93e3d7
+platform/external/shaderc/shaderc 753e1962e17086dba783fd67c750dfbcac5871c8
platform/external/shaderc/spirv-headers cea581f35f25531b412b6bba1462319824bee404
-platform/external/shaderc/spirv-tools eec320fe2ae3baf0af7cc47d867fe25a76138c4e
+platform/external/shaderc/spirv-tools 016fdf77183ed931a7c9d002ee9a4c81861ebf15
platform/external/vulkan-validation-layers ea04d8a697116044ba085f678ec98438883bfc7c
-platform/ndk 35d03f2af41ec1adf0069386af23b7e4172d472f
+platform/ndk e8cac067d4b3bc19511e6a91ddefb88b928bad41
platform/prebuilts/clang/host/darwin-x86 73e58d09a20724718e3c963ea5867835c43a7544
-platform/prebuilts/clang/host/linux-x86 bc5d76c6d04bfccd099c40cf19e8e9ad51e9ef4e
+platform/prebuilts/clang/host/linux-x86 8eb2cea18ef8672eafca64c10b64c1fa681ad07c
platform/prebuilts/clang/host/windows-x86 d56c012cc7dfe258810ace806eb4bf4757af1882
platform/prebuilts/cmake/darwin-x86 289589930105f9db21313bfd5aaa4a5fe02509e5
platform/prebuilts/cmake/linux-x86 ee96b2ec399702e23faee15863fed3ae33144fdd
@@ -31,15 +31,15 @@ platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 73ca99196723f81
platform/prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8 47200031477701f568a772368e3f6c8ce49fe05c
platform/prebuilts/gcc/linux-x86/mips/mips64el-linux-android-4.9 a332df8dadd78866c2a57f2c7348d499952939c2
platform/prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 1944f44167a907969de5138fe4cc442432b7cc37
-platform/prebuilts/ndk 242e8e6d4928f3f9e1caca33ce441c88e8a4dc3c
+platform/prebuilts/ndk 255b9ba334c3fe6925a34cf47b2afaf3b33c4426
platform/prebuilts/ninja/darwin-x86 00f798346dedb4a7a6a6dcc9ad32ff09d66ee0db
platform/prebuilts/ninja/linux-x86 6369b19fc3fbe765636af75d394627e2b92599ed
platform/prebuilts/python/darwin-x86/2.7.5 0c5958b1636c47ed7c284f859c8e805fd06a0e63
platform/prebuilts/python/linux-x86/2.7.5 3229c5a24529bad2a85691f17d9e75a300af5085
platform/prebuilts/renderscript/host/darwin-x86 a0ede5664b4741348c0b6c8d5da06d483dcf2876
platform/prebuilts/renderscript/host/linux-x86 bad4a4a7b6a3d0e77bfb2e30c43f68a3f681d245
-platform/prebuilts/renderscript/host/windows-x86 52dc95db19940892624a8a19061e3f6f764758fc
-platform/prebuilts/simpleperf 67b5f82a399fba110faf781a8be55aaaa0f21084
+platform/prebuilts/renderscript/host/windows-x86 5df9f20565e63906167c82f6120c78e969b3b467
+platform/prebuilts/simpleperf 6e25cdb1167d8cb9cc84ddc8624f9f25272c214e
toolchain/binutils 249bab53c3518fccd432baf1447422b25d884732
toolchain/build f280657461aee54b6d2807881d8a77832f4e794c
toolchain/cloog 604793eab97d360aef729f064674569ee6dbf3e1
@@ -53,5 +53,5 @@ toolchain/mpfr de979fc377db766591e7feaf052f0de59be46e76
toolchain/ppl 979062d362bc5a1c00804237b408b19b4618fb24
toolchain/python 6a7fc9bfd21da85dda97a8bcd2952e0bfbded424
toolchain/sed 45df23d6dc8b51ea5cd903d023c10fd7d72415b9
-toolchain/xz 595407f5a237e9bfd6821d70096d38825ec9c4e0
+toolchain/xz a0eb1f5763e7b4a3daf4fd7d1ac9504058cc1082
toolchain/yasm a159fe073809b4138cf90b7298ea31ea17af85c0