summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Davidson <jpd@google.com>2014-10-06 17:10:21 -0700
committerJeff Davidson <jpd@google.com>2014-10-06 17:10:21 -0700
commite01d63c7f5bc4e47d62014ebaabf1ba036ad7c73 (patch)
treee8e42b2a116955aadaef65a8befd7a02b731d1ec
parentc03a283c8ef41c1534e7f101a34702dba8c422e0 (diff)
download22-darwin-e01d63c7f5bc4e47d62014ebaabf1ba036ad7c73.tar.gz
Import L Build Tools from git_lmp-mr1-release build 1491975
http://android-build/builds/git_lmp-mr1-release-mac-sdk_phone_mips-sdk/1491975/sdk-repo-darwin-build-tools-1491975.zip source.properties has been modified to make this appear as API 22 Change-Id: I213fafce405fcbfe45ab89b0ac0bfa7d99669464
-rwxr-xr-xaaptbin1306792 -> 1371948 bytes
-rwxr-xr-xaidlbin285356 -> 285356 bytes
-rwxr-xr-xbcc_compatbin36604 -> 36240 bytes
-rwxr-xr-xdexdumpbin151252 -> 151632 bytes
-rw-r--r--lib/++.dylibbin0 -> 1086100 bytes
-rw-r--r--lib/LVM.dylibbin0 -> 22963148 bytes
-rw-r--r--lib/cc.dylibbin0 -> 246140 bytes
-rw-r--r--lib/cinfo.dylibbin0 -> 680188 bytes
-rw-r--r--lib/dx.jarbin950890 -> 958047 bytes
-rw-r--r--lib/lang.dylibbin0 -> 15638204 bytes
-rwxr-xr-xlibLLVM.dylibbin21589424 -> 22963148 bytes
-rwxr-xr-xlibbcc.dylibbin243076 -> 246140 bytes
-rwxr-xr-xlibbcinfo.dylibbin684816 -> 680188 bytes
-rw-r--r--libc++.dylibbin0 -> 1086100 bytes
-rwxr-xr-xlibclang.dylibbin15066256 -> 15638204 bytes
-rwxr-xr-xllvm-rs-ccbin1958840 -> 2010840 bytes
-rw-r--r--renderscript/clang-include/CMakeLists.txt1
-rw-r--r--renderscript/clang-include/Intrin.h46
-rw-r--r--renderscript/clang-include/altivec.h583
-rw-r--r--renderscript/clang-include/arm_acle.h151
-rw-r--r--renderscript/clang-include/avxintrin.h9
-rw-r--r--renderscript/clang-include/bmiintrin.h35
-rw-r--r--renderscript/clang-include/ia32intrin.h5
-rw-r--r--renderscript/clang-include/stdarg.h2
-rw-r--r--renderscript/clang-include/x86intrin.h2
-rw-r--r--renderscript/clang-include/xmmintrin.h6
-rw-r--r--renderscript/include/rs_allocation.rsh2
-rw-r--r--renderscript/include/rs_core_math.rsh2124
-rw-r--r--renderscript/include/rs_matrix.rsh308
-rw-r--r--renderscript/include/rs_time.rsh4
-rw-r--r--renderscript/include/rs_types.rsh12
-rw-r--r--renderscript/lib/bc/armeabi-v7a/libclcore.bcbin218080 -> 235932 bytes
-rw-r--r--renderscript/lib/bc/mips/libclcore.bcbin308296 -> 235932 bytes
-rw-r--r--renderscript/lib/bc/x86/libclcore.bcbin215864 -> 232168 bytes
-rw-r--r--[-rwxr-xr-x]renderscript/lib/intermediates/armeabi-v7a/libc.sobin481290 -> 601616 bytes
-rw-r--r--[-rwxr-xr-x]renderscript/lib/intermediates/armeabi-v7a/libm.sobin91404 -> 95504 bytes
-rw-r--r--[-rwxr-xr-x]renderscript/lib/intermediates/mips/libc.sobin764091 -> 930771 bytes
-rw-r--r--[-rwxr-xr-x]renderscript/lib/intermediates/mips/libm.sobin136868 -> 145140 bytes
-rw-r--r--[-rwxr-xr-x]renderscript/lib/intermediates/x86/libc.sobin864112 -> 995537 bytes
-rw-r--r--[-rwxr-xr-x]renderscript/lib/intermediates/x86/libm.sobin128216 -> 132360 bytes
-rw-r--r--[-rwxr-xr-x]renderscript/lib/packaged/armeabi-v7a/libRSSupport.sobin383220 -> 415936 bytes
-rw-r--r--[-rwxr-xr-x]renderscript/lib/packaged/armeabi-v7a/librsjni.sobin18560 -> 18560 bytes
-rw-r--r--[-rwxr-xr-x]renderscript/lib/packaged/mips/libRSSupport.sobin536900 -> 583320 bytes
-rw-r--r--[-rwxr-xr-x]renderscript/lib/packaged/mips/librsjni.sobin71868 -> 71924 bytes
-rw-r--r--[-rwxr-xr-x]renderscript/lib/packaged/x86/libRSSupport.sobin489604 -> 514128 bytes
-rw-r--r--[-rwxr-xr-x]renderscript/lib/packaged/x86/librsjni.sobin26636 -> 26636 bytes
-rw-r--r--renderscript/lib/renderscript-v8.jarbin144982 -> 146272 bytes
-rw-r--r--source.properties2
-rwxr-xr-xzipalignbin75536 -> 106384 bytes
49 files changed, 2703 insertions, 589 deletions
diff --git a/aapt b/aapt
index 4a903f5..37548f0 100755
--- a/aapt
+++ b/aapt
Binary files differ
diff --git a/aidl b/aidl
index 29bb250..7532292 100755
--- a/aidl
+++ b/aidl
Binary files differ
diff --git a/bcc_compat b/bcc_compat
index a073600..3f92f76 100755
--- a/bcc_compat
+++ b/bcc_compat
Binary files differ
diff --git a/dexdump b/dexdump
index 5fd2197..45f24a4 100755
--- a/dexdump
+++ b/dexdump
Binary files differ
diff --git a/lib/++.dylib b/lib/++.dylib
new file mode 100644
index 0000000..837c0dc
--- /dev/null
+++ b/lib/++.dylib
Binary files differ
diff --git a/lib/LVM.dylib b/lib/LVM.dylib
new file mode 100644
index 0000000..1617b05
--- /dev/null
+++ b/lib/LVM.dylib
Binary files differ
diff --git a/lib/cc.dylib b/lib/cc.dylib
new file mode 100644
index 0000000..6114ee0
--- /dev/null
+++ b/lib/cc.dylib
Binary files differ
diff --git a/lib/cinfo.dylib b/lib/cinfo.dylib
new file mode 100644
index 0000000..82e2970
--- /dev/null
+++ b/lib/cinfo.dylib
Binary files differ
diff --git a/lib/dx.jar b/lib/dx.jar
index 103f662..f456278 100644
--- a/lib/dx.jar
+++ b/lib/dx.jar
Binary files differ
diff --git a/lib/lang.dylib b/lib/lang.dylib
new file mode 100644
index 0000000..341e7fc
--- /dev/null
+++ b/lib/lang.dylib
Binary files differ
diff --git a/libLLVM.dylib b/libLLVM.dylib
index 6e0934b..1617b05 100755
--- a/libLLVM.dylib
+++ b/libLLVM.dylib
Binary files differ
diff --git a/libbcc.dylib b/libbcc.dylib
index bff079b..6114ee0 100755
--- a/libbcc.dylib
+++ b/libbcc.dylib
Binary files differ
diff --git a/libbcinfo.dylib b/libbcinfo.dylib
index a2e1a9f..82e2970 100755
--- a/libbcinfo.dylib
+++ b/libbcinfo.dylib
Binary files differ
diff --git a/libc++.dylib b/libc++.dylib
new file mode 100644
index 0000000..837c0dc
--- /dev/null
+++ b/libc++.dylib
Binary files differ
diff --git a/libclang.dylib b/libclang.dylib
index 7fd3f87..341e7fc 100755
--- a/libclang.dylib
+++ b/libclang.dylib
Binary files differ
diff --git a/llvm-rs-cc b/llvm-rs-cc
index 24628a1..cc589b6 100755
--- a/llvm-rs-cc
+++ b/llvm-rs-cc
Binary files differ
diff --git a/renderscript/clang-include/CMakeLists.txt b/renderscript/clang-include/CMakeLists.txt
index 23b2446..edee7d7 100644
--- a/renderscript/clang-include/CMakeLists.txt
+++ b/renderscript/clang-include/CMakeLists.txt
@@ -1,6 +1,7 @@
set(files
altivec.h
ammintrin.h
+ arm_acle.h
avxintrin.h
avx2intrin.h
bmiintrin.h
diff --git a/renderscript/clang-include/Intrin.h b/renderscript/clang-include/Intrin.h
index ff6d278..13e105e 100644
--- a/renderscript/clang-include/Intrin.h
+++ b/renderscript/clang-include/Intrin.h
@@ -30,21 +30,27 @@
#define __INTRIN_H
/* First include the standard intrinsics. */
+#if defined(__i386__) || defined(__x86_64__)
#include <x86intrin.h>
+#endif
/* For the definition of jmp_buf. */
+#if __STDC_HOSTED__
#include <setjmp.h>
+#endif
#ifdef __cplusplus
extern "C" {
#endif
+#if defined(__MMX__)
/* And the random ones that aren't in those files. */
__m64 _m_from_float(float);
__m64 _m_from_int(int _l);
void _m_prefetch(void *);
float _m_to_float(__m64);
int _m_to_int(__m64 _M);
+#endif
/* Other assorted instruction intrinsics. */
void __addfsbyte(unsigned long, unsigned char);
@@ -223,8 +229,7 @@ static __inline__
long __cdecl _InterlockedDecrement(long volatile *_Addend);
static __inline__
short _InterlockedDecrement16(short volatile *_Addend);
-static __inline__
-long __cdecl _InterlockedExchange(long volatile *_Target, long _Value);
+long _InterlockedExchange(long volatile *_Target, long _Value);
static __inline__
short _InterlockedExchange16(short volatile *_Target, short _Value);
static __inline__
@@ -288,7 +293,9 @@ unsigned __int64 __cdecl _rotr64(unsigned __int64 _Value, int _Shift);
static __inline__
unsigned char _rotr8(unsigned char _Value, unsigned char _Shift);
int _sarx_i32(int, unsigned int);
+#if __STDC_HOSTED__
int __cdecl _setjmp(jmp_buf);
+#endif
unsigned int _shlx_u32(unsigned int, unsigned int);
unsigned int _shrx_u32(unsigned int, unsigned int);
void _Store_HLERelease(long volatile *, long);
@@ -411,7 +418,6 @@ __int64 _InterlockedCompareExchange64_HLERelease(__int64 volatile *, __int64,
__int64);
__int64 _InterlockedCompareExchange64_np(__int64 volatile *_Destination,
__int64 _Exchange, __int64 _Comparand);
-static __inline__
void *_InterlockedCompareExchangePointer(void *volatile *_Destination,
void *_Exchange, void *_Comparand);
void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination,
@@ -422,7 +428,6 @@ static __inline__
__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
static __inline__
__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
-static __inline__
void *_InterlockedExchangePointer(void *volatile *_Target, void *_Value);
static __inline__
__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
@@ -448,8 +453,9 @@ unsigned int __cdecl _readgsbase_u32(void);
unsigned __int64 __cdecl _readgsbase_u64(void);
unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int);
__int64 _sarx_i64(__int64, unsigned int);
-/* FIXME: Need definition for jmp_buf.
- int __cdecl _setjmpex(jmp_buf); */
+#if __STDC_HOSTED__
+int __cdecl _setjmpex(jmp_buf);
+#endif
unsigned __int64 _shlx_u64(unsigned __int64, unsigned int);
unsigned __int64 shrx_u64(unsigned __int64, unsigned int);
unsigned __int64 _tzcnt_u64(unsigned __int64);
@@ -575,6 +581,7 @@ _bittestandset(long *a, long b) {
*a = *a | (1 << b);
return x;
}
+#if defined(__i386__) || defined(__x86_64__)
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_interlockedbittestandset(long volatile *__BitBase, long __BitPos) {
unsigned char __Res;
@@ -585,6 +592,7 @@ _interlockedbittestandset(long volatile *__BitBase, long __BitPos) {
: "Ir"(__BitPos));
return __Res;
}
+#endif
#ifdef __x86_64__
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask) {
@@ -785,22 +793,12 @@ _InterlockedExchange16(short volatile *_Target, short _Value) {
__atomic_exchange(_Target, &_Value, &_Value, 0);
return _Value;
}
-static __inline__ long __attribute__((__always_inline__, __nodebug__))
-_InterlockedExchange(long volatile *_Target, long _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, 0);
- return _Value;
-}
#ifdef __x86_64__
static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
_InterlockedExchange64(__int64 volatile *_Target, __int64 _Value) {
__atomic_exchange(_Target, &_Value, &_Value, 0);
return _Value;
}
-static __inline__ void *__attribute__((__always_inline__, __nodebug__))
-_InterlockedExchangePointer(void *volatile *_Target, void *_Value) {
- __atomic_exchange(_Target, &_Value, &_Value, 0);
- return _Value;
-}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Compare Exchange
@@ -817,14 +815,6 @@ _InterlockedCompareExchange16(short volatile *_Destination,
__atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
return _Comparand;
}
-#ifdef __x86_64__
-static __inline__ void *__attribute__((__always_inline__, __nodebug__))
-_InterlockedCompareExchangePointer(void *volatile *_Destination,
- void *_Exchange, void *_Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0, 0, 0);
- return _Comparand;
-}
-#endif
static __inline__ __int64 __attribute__((__always_inline__, __nodebug__))
_InterlockedCompareExchange64(__int64 volatile *_Destination,
__int64 _Exchange, __int64 _Comparand) {
@@ -834,6 +824,7 @@ _InterlockedCompareExchange64(__int64 volatile *_Destination,
/*----------------------------------------------------------------------------*\
|* Barriers
\*----------------------------------------------------------------------------*/
+#if defined(__i386__) || defined(__x86_64__)
static __inline__ void __attribute__((__always_inline__, __nodebug__))
__attribute__((deprecated("use other intrinsics or C++11 atomics instead")))
_ReadWriteBarrier(void) {
@@ -849,6 +840,7 @@ __attribute__((deprecated("use other intrinsics or C++11 atomics instead")))
_WriteBarrier(void) {
__asm__ volatile ("" : : : "memory");
}
+#endif
#ifdef __x86_64__
static __inline__ void __attribute__((__always_inline__, __nodebug__))
__faststorefence(void) {
@@ -904,6 +896,7 @@ __readgsword(unsigned long __offset) {
/*----------------------------------------------------------------------------*\
|* movs, stos
\*----------------------------------------------------------------------------*/
+#if defined(__i386__) || defined(__x86_64__)
static __inline__ void __attribute__((__always_inline__, __nodebug__))
__movsb(unsigned char *__dst, unsigned char const *__src, size_t __n) {
__asm__("rep movsb" : : "D"(__dst), "S"(__src), "c"(__n)
@@ -934,6 +927,7 @@ __stosw(unsigned short *__dst, unsigned short __x, size_t __n) {
__asm__("rep stosh" : : "D"(__dst), "a"(__x), "c"(__n)
: "%edi", "%ecx");
}
+#endif
#ifdef __x86_64__
static __inline__ void __attribute__((__always_inline__, __nodebug__))
__movsq(unsigned long long *__dst, unsigned long long const *__src, size_t __n) {
@@ -958,6 +952,7 @@ static __inline__ void * __attribute__((__always_inline__, __nodebug__))
_ReturnAddress(void) {
return __builtin_return_address(0);
}
+#if defined(__i386__) || defined(__x86_64__)
static __inline__ void __attribute__((__always_inline__, __nodebug__))
__cpuid(int __info[4], int __level) {
__asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
@@ -978,10 +973,12 @@ static __inline__ void __attribute__((__always_inline__, __nodebug__))
__halt(void) {
__asm__ volatile ("hlt");
}
+#endif
/*----------------------------------------------------------------------------*\
|* Privileged intrinsics
\*----------------------------------------------------------------------------*/
+#if defined(__i386__) || defined(__x86_64__)
static __inline__ unsigned __int64 __attribute__((__always_inline__, __nodebug__))
__readmsr(unsigned long __register) {
// Loads the contents of a 64-bit model specific register (MSR) specified in
@@ -1007,6 +1004,7 @@ static __inline__ void __attribute__((always_inline, __nodebug__))
__writecr3(unsigned int __cr3_val) {
__asm__ ("mov %0, %%cr3" : : "q"(__cr3_val) : "memory");
}
+#endif
#ifdef __cplusplus
}
diff --git a/renderscript/clang-include/altivec.h b/renderscript/clang-include/altivec.h
index 74ce08a..7a4a774 100644
--- a/renderscript/clang-include/altivec.h
+++ b/renderscript/clang-include/altivec.h
@@ -73,6 +73,9 @@ vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c);
static vector float __ATTRS_o_ai
vec_perm(vector float __a, vector float __b, vector unsigned char __c);
+static vector unsigned char __ATTRS_o_ai
+vec_xor(vector unsigned char __a, vector unsigned char __b);
+
/* vec_abs */
#define __builtin_altivec_abs_v16qi vec_abs
@@ -3485,30 +3488,49 @@ vec_mtvscr(vector float __a)
__builtin_altivec_mtvscr((vector int)__a);
}
+/* The vmulos* and vmules* instructions have a big endian bias, so
+ we must reverse the meaning of "even" and "odd" for little endian. */
+
/* vec_mule */
static vector short __ATTRS_o_ai
vec_mule(vector signed char __a, vector signed char __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosb(__a, __b);
+#else
return __builtin_altivec_vmulesb(__a, __b);
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_mule(vector unsigned char __a, vector unsigned char __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuloub(__a, __b);
+#else
return __builtin_altivec_vmuleub(__a, __b);
+#endif
}
static vector int __ATTRS_o_ai
vec_mule(vector short __a, vector short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosh(__a, __b);
+#else
return __builtin_altivec_vmulesh(__a, __b);
+#endif
}
static vector unsigned int __ATTRS_o_ai
vec_mule(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulouh(__a, __b);
+#else
return __builtin_altivec_vmuleuh(__a, __b);
+#endif
}
/* vec_vmulesb */
@@ -3516,7 +3538,11 @@ vec_mule(vector unsigned short __a, vector unsigned short __b)
static vector short __attribute__((__always_inline__))
vec_vmulesb(vector signed char __a, vector signed char __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosb(__a, __b);
+#else
return __builtin_altivec_vmulesb(__a, __b);
+#endif
}
/* vec_vmuleub */
@@ -3524,7 +3550,11 @@ vec_vmulesb(vector signed char __a, vector signed char __b)
static vector unsigned short __attribute__((__always_inline__))
vec_vmuleub(vector unsigned char __a, vector unsigned char __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuloub(__a, __b);
+#else
return __builtin_altivec_vmuleub(__a, __b);
+#endif
}
/* vec_vmulesh */
@@ -3532,7 +3562,11 @@ vec_vmuleub(vector unsigned char __a, vector unsigned char __b)
static vector int __attribute__((__always_inline__))
vec_vmulesh(vector short __a, vector short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulosh(__a, __b);
+#else
return __builtin_altivec_vmulesh(__a, __b);
+#endif
}
/* vec_vmuleuh */
@@ -3540,7 +3574,11 @@ vec_vmulesh(vector short __a, vector short __b)
static vector unsigned int __attribute__((__always_inline__))
vec_vmuleuh(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulouh(__a, __b);
+#else
return __builtin_altivec_vmuleuh(__a, __b);
+#endif
}
/* vec_mulo */
@@ -3548,25 +3586,41 @@ vec_vmuleuh(vector unsigned short __a, vector unsigned short __b)
static vector short __ATTRS_o_ai
vec_mulo(vector signed char __a, vector signed char __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesb(__a, __b);
+#else
return __builtin_altivec_vmulosb(__a, __b);
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_mulo(vector unsigned char __a, vector unsigned char __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleub(__a, __b);
+#else
return __builtin_altivec_vmuloub(__a, __b);
+#endif
}
static vector int __ATTRS_o_ai
vec_mulo(vector short __a, vector short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesh(__a, __b);
+#else
return __builtin_altivec_vmulosh(__a, __b);
+#endif
}
static vector unsigned int __ATTRS_o_ai
vec_mulo(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleuh(__a, __b);
+#else
return __builtin_altivec_vmulouh(__a, __b);
+#endif
}
/* vec_vmulosb */
@@ -3574,7 +3628,11 @@ vec_mulo(vector unsigned short __a, vector unsigned short __b)
static vector short __attribute__((__always_inline__))
vec_vmulosb(vector signed char __a, vector signed char __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesb(__a, __b);
+#else
return __builtin_altivec_vmulosb(__a, __b);
+#endif
}
/* vec_vmuloub */
@@ -3582,7 +3640,11 @@ vec_vmulosb(vector signed char __a, vector signed char __b)
static vector unsigned short __attribute__((__always_inline__))
vec_vmuloub(vector unsigned char __a, vector unsigned char __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleub(__a, __b);
+#else
return __builtin_altivec_vmuloub(__a, __b);
+#endif
}
/* vec_vmulosh */
@@ -3590,7 +3652,11 @@ vec_vmuloub(vector unsigned char __a, vector unsigned char __b)
static vector int __attribute__((__always_inline__))
vec_vmulosh(vector short __a, vector short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmulesh(__a, __b);
+#else
return __builtin_altivec_vmulosh(__a, __b);
+#endif
}
/* vec_vmulouh */
@@ -3598,7 +3664,11 @@ vec_vmulosh(vector short __a, vector short __b)
static vector unsigned int __attribute__((__always_inline__))
vec_vmulouh(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vmuleuh(__a, __b);
+#else
return __builtin_altivec_vmulouh(__a, __b);
+#endif
}
/* vec_nmsub */
@@ -4047,52 +4117,91 @@ vec_vor(vector float __a, vector bool int __b)
/* vec_pack */
+/* The various vector pack instructions have a big-endian bias, so for
+ little endian we must handle reversed element numbering. */
+
static vector signed char __ATTRS_o_ai
vec_pack(vector signed short __a, vector signed short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector signed char)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
return (vector signed char)vec_perm(__a, __b, (vector unsigned char)
(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
}
static vector unsigned char __ATTRS_o_ai
vec_pack(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned char)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
return (vector unsigned char)vec_perm(__a, __b, (vector unsigned char)
(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
}
static vector bool char __ATTRS_o_ai
vec_pack(vector bool short __a, vector bool short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool char)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
return (vector bool char)vec_perm(__a, __b, (vector unsigned char)
(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
}
static vector short __ATTRS_o_ai
vec_pack(vector int __a, vector int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector short)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
return (vector short)vec_perm(__a, __b, (vector unsigned char)
(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_pack(vector unsigned int __a, vector unsigned int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned short)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
return (vector unsigned short)vec_perm(__a, __b, (vector unsigned char)
(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
}
static vector bool short __ATTRS_o_ai
vec_pack(vector bool int __a, vector bool int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
return (vector bool short)vec_perm(__a, __b, (vector unsigned char)
(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
}
/* vec_vpkuhum */
@@ -4102,25 +4211,43 @@ vec_pack(vector bool int __a, vector bool int __b)
static vector signed char __ATTRS_o_ai
vec_vpkuhum(vector signed short __a, vector signed short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector signed char)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
return (vector signed char)vec_perm(__a, __b, (vector unsigned char)
(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
}
static vector unsigned char __ATTRS_o_ai
vec_vpkuhum(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned char)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
return (vector unsigned char)vec_perm(__a, __b, (vector unsigned char)
(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
}
static vector bool char __ATTRS_o_ai
vec_vpkuhum(vector bool short __a, vector bool short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool char)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
+#else
return (vector bool char)vec_perm(__a, __b, (vector unsigned char)
(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+#endif
}
/* vec_vpkuwum */
@@ -4130,25 +4257,43 @@ vec_vpkuhum(vector bool short __a, vector bool short __b)
static vector short __ATTRS_o_ai
vec_vpkuwum(vector int __a, vector int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector short)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
return (vector short)vec_perm(__a, __b, (vector unsigned char)
(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_vpkuwum(vector unsigned int __a, vector unsigned int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned short)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
return (vector unsigned short)vec_perm(__a, __b, (vector unsigned char)
(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
}
static vector bool short __ATTRS_o_ai
vec_vpkuwum(vector bool int __a, vector bool int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)vec_perm(__a, __b, (vector unsigned char)
+ (0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
+ 0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
+#else
return (vector bool short)vec_perm(__a, __b, (vector unsigned char)
(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+#endif
}
/* vec_packpx */
@@ -4156,7 +4301,11 @@ vec_vpkuwum(vector bool int __a, vector bool int __b)
static vector pixel __attribute__((__always_inline__))
vec_packpx(vector unsigned int __a, vector unsigned int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
+#else
return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
+#endif
}
/* vec_vpkpx */
@@ -4164,7 +4313,11 @@ vec_packpx(vector unsigned int __a, vector unsigned int __b)
static vector pixel __attribute__((__always_inline__))
vec_vpkpx(vector unsigned int __a, vector unsigned int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
+#else
return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
+#endif
}
/* vec_packs */
@@ -4172,25 +4325,41 @@ vec_vpkpx(vector unsigned int __a, vector unsigned int __b)
static vector signed char __ATTRS_o_ai
vec_packs(vector short __a, vector short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshss(__b, __a);
+#else
return __builtin_altivec_vpkshss(__a, __b);
+#endif
}
static vector unsigned char __ATTRS_o_ai
vec_packs(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
return __builtin_altivec_vpkuhus(__a, __b);
+#endif
}
static vector signed short __ATTRS_o_ai
vec_packs(vector int __a, vector int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswss(__b, __a);
+#else
return __builtin_altivec_vpkswss(__a, __b);
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_packs(vector unsigned int __a, vector unsigned int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
return __builtin_altivec_vpkuwus(__a, __b);
+#endif
}
/* vec_vpkshss */
@@ -4198,7 +4367,11 @@ vec_packs(vector unsigned int __a, vector unsigned int __b)
static vector signed char __attribute__((__always_inline__))
vec_vpkshss(vector short __a, vector short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshss(__b, __a);
+#else
return __builtin_altivec_vpkshss(__a, __b);
+#endif
}
/* vec_vpkuhus */
@@ -4206,7 +4379,11 @@ vec_vpkshss(vector short __a, vector short __b)
static vector unsigned char __attribute__((__always_inline__))
vec_vpkuhus(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
return __builtin_altivec_vpkuhus(__a, __b);
+#endif
}
/* vec_vpkswss */
@@ -4214,7 +4391,11 @@ vec_vpkuhus(vector unsigned short __a, vector unsigned short __b)
static vector signed short __attribute__((__always_inline__))
vec_vpkswss(vector int __a, vector int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswss(__b, __a);
+#else
return __builtin_altivec_vpkswss(__a, __b);
+#endif
}
/* vec_vpkuwus */
@@ -4222,7 +4403,11 @@ vec_vpkswss(vector int __a, vector int __b)
static vector unsigned short __attribute__((__always_inline__))
vec_vpkuwus(vector unsigned int __a, vector unsigned int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
return __builtin_altivec_vpkuwus(__a, __b);
+#endif
}
/* vec_packsu */
@@ -4230,25 +4415,41 @@ vec_vpkuwus(vector unsigned int __a, vector unsigned int __b)
static vector unsigned char __ATTRS_o_ai
vec_packsu(vector short __a, vector short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshus(__b, __a);
+#else
return __builtin_altivec_vpkshus(__a, __b);
+#endif
}
static vector unsigned char __ATTRS_o_ai
vec_packsu(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
return __builtin_altivec_vpkuhus(__a, __b);
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_packsu(vector int __a, vector int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswus(__b, __a);
+#else
return __builtin_altivec_vpkswus(__a, __b);
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_packsu(vector unsigned int __a, vector unsigned int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
return __builtin_altivec_vpkuwus(__a, __b);
+#endif
}
/* vec_vpkshus */
@@ -4256,13 +4457,21 @@ vec_packsu(vector unsigned int __a, vector unsigned int __b)
static vector unsigned char __ATTRS_o_ai
vec_vpkshus(vector short __a, vector short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkshus(__b, __a);
+#else
return __builtin_altivec_vpkshus(__a, __b);
+#endif
}
static vector unsigned char __ATTRS_o_ai
vec_vpkshus(vector unsigned short __a, vector unsigned short __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuhus(__b, __a);
+#else
return __builtin_altivec_vpkuhus(__a, __b);
+#endif
}
/* vec_vpkswus */
@@ -4270,22 +4479,46 @@ vec_vpkshus(vector unsigned short __a, vector unsigned short __b)
static vector unsigned short __ATTRS_o_ai
vec_vpkswus(vector int __a, vector int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkswus(__b, __a);
+#else
return __builtin_altivec_vpkswus(__a, __b);
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_vpkswus(vector unsigned int __a, vector unsigned int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vpkuwus(__b, __a);
+#else
return __builtin_altivec_vpkuwus(__a, __b);
+#endif
}
/* vec_perm */
+// The vperm instruction is defined architecturally with a big-endian bias.
+// For little endian, we swap the input operands and invert the permute
+// control vector. Only the rightmost 5 bits matter, so we could use
+// a vector of all 31s instead of all 255s to perform the inversion.
+// However, when the PCV is not a constant, using 255 has an advantage
+// in that the vec_xor can be recognized as a vec_nor (and for P8 and
+// later, possibly a vec_nand).
+
vector signed char __ATTRS_o_ai
vec_perm(vector signed char __a, vector signed char __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector signed char)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector signed char)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector unsigned char __ATTRS_o_ai
@@ -4293,22 +4526,46 @@ vec_perm(vector unsigned char __a,
vector unsigned char __b,
vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned char)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector unsigned char)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector bool char __ATTRS_o_ai
vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector bool char)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector bool char)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector short __ATTRS_o_ai
vec_perm(vector short __a, vector short __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector short)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector short)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector unsigned short __ATTRS_o_ai
@@ -4316,49 +4573,104 @@ vec_perm(vector unsigned short __a,
vector unsigned short __b,
vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned short)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector unsigned short)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector bool short __ATTRS_o_ai
vec_perm(vector bool short __a, vector bool short __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector bool short)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector bool short)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector pixel __ATTRS_o_ai
vec_perm(vector pixel __a, vector pixel __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector pixel)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector pixel)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector int __ATTRS_o_ai
vec_perm(vector int __a, vector int __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector int)__builtin_altivec_vperm_4si(__b, __a, __d);
+#else
return (vector int)__builtin_altivec_vperm_4si(__a, __b, __c);
+#endif
}
vector unsigned int __ATTRS_o_ai
vec_perm(vector unsigned int __a, vector unsigned int __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector unsigned int)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector unsigned int)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector bool int __ATTRS_o_ai
vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector bool int)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector bool int)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
vector float __ATTRS_o_ai
vec_perm(vector float __a, vector float __b, vector unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __d = {255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255};
+ __d = vec_xor(__c, __d);
+ return (vector float)
+ __builtin_altivec_vperm_4si((vector int)__b, (vector int)__a, __d);
+#else
return (vector float)
__builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+#endif
}
/* vec_vperm */
@@ -4366,8 +4678,7 @@ vec_perm(vector float __a, vector float __b, vector unsigned char __c)
static vector signed char __ATTRS_o_ai
vec_vperm(vector signed char __a, vector signed char __b, vector unsigned char __c)
{
- return (vector signed char)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector unsigned char __ATTRS_o_ai
@@ -4375,22 +4686,19 @@ vec_vperm(vector unsigned char __a,
vector unsigned char __b,
vector unsigned char __c)
{
- return (vector unsigned char)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector bool char __ATTRS_o_ai
vec_vperm(vector bool char __a, vector bool char __b, vector unsigned char __c)
{
- return (vector bool char)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector short __ATTRS_o_ai
vec_vperm(vector short __a, vector short __b, vector unsigned char __c)
{
- return (vector short)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector unsigned short __ATTRS_o_ai
@@ -4398,49 +4706,43 @@ vec_vperm(vector unsigned short __a,
vector unsigned short __b,
vector unsigned char __c)
{
- return (vector unsigned short)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector bool short __ATTRS_o_ai
vec_vperm(vector bool short __a, vector bool short __b, vector unsigned char __c)
{
- return (vector bool short)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector pixel __ATTRS_o_ai
vec_vperm(vector pixel __a, vector pixel __b, vector unsigned char __c)
{
- return (vector pixel)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector int __ATTRS_o_ai
vec_vperm(vector int __a, vector int __b, vector unsigned char __c)
{
- return (vector int)__builtin_altivec_vperm_4si(__a, __b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector unsigned int __ATTRS_o_ai
vec_vperm(vector unsigned int __a, vector unsigned int __b, vector unsigned char __c)
{
- return (vector unsigned int)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector bool int __ATTRS_o_ai
vec_vperm(vector bool int __a, vector bool int __b, vector unsigned char __c)
{
- return (vector bool int)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
static vector float __ATTRS_o_ai
vec_vperm(vector float __a, vector float __b, vector unsigned char __c)
{
- return (vector float)
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__b, __c);
+ return vec_perm(__a, __b, __c);
}
/* vec_re */
@@ -4922,65 +5224,113 @@ vec_vslw(vector unsigned int __a, vector unsigned int __b)
static vector signed char __ATTRS_o_ai
vec_sld(vector signed char __a, vector signed char __b, unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(__a, __b, (vector unsigned char)
+ (__c, __c-1, __c-2, __c-3, __c-4, __c-5, __c-6, __c-7,
+ __c-8, __c-9, __c-10, __c-11, __c-12, __c-13, __c-14, __c-15));
+#else
return vec_perm(__a, __b, (vector unsigned char)
(__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
__c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15));
+#endif
}
static vector unsigned char __ATTRS_o_ai
vec_sld(vector unsigned char __a, vector unsigned char __b, unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(__a, __b, (vector unsigned char)
+ (__c, __c-1, __c-2, __c-3, __c-4, __c-5, __c-6, __c-7,
+ __c-8, __c-9, __c-10, __c-11, __c-12, __c-13, __c-14, __c-15));
+#else
return vec_perm(__a, __b, (vector unsigned char)
(__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
__c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15));
+#endif
}
static vector short __ATTRS_o_ai
vec_sld(vector short __a, vector short __b, unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(__a, __b, (vector unsigned char)
+ (__c, __c-1, __c-2, __c-3, __c-4, __c-5, __c-6, __c-7,
+ __c-8, __c-9, __c-10, __c-11, __c-12, __c-13, __c-14, __c-15));
+#else
return vec_perm(__a, __b, (vector unsigned char)
(__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
__c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15));
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_sld(vector unsigned short __a, vector unsigned short __b, unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(__a, __b, (vector unsigned char)
+ (__c, __c-1, __c-2, __c-3, __c-4, __c-5, __c-6, __c-7,
+ __c-8, __c-9, __c-10, __c-11, __c-12, __c-13, __c-14, __c-15));
+#else
return vec_perm(__a, __b, (vector unsigned char)
(__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
__c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15));
+#endif
}
static vector pixel __ATTRS_o_ai
vec_sld(vector pixel __a, vector pixel __b, unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(__a, __b, (vector unsigned char)
+ (__c, __c-1, __c-2, __c-3, __c-4, __c-5, __c-6, __c-7,
+ __c-8, __c-9, __c-10, __c-11, __c-12, __c-13, __c-14, __c-15));
+#else
return vec_perm(__a, __b, (vector unsigned char)
(__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
__c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15));
+#endif
}
static vector int __ATTRS_o_ai
vec_sld(vector int __a, vector int __b, unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(__a, __b, (vector unsigned char)
+ (__c, __c-1, __c-2, __c-3, __c-4, __c-5, __c-6, __c-7,
+ __c-8, __c-9, __c-10, __c-11, __c-12, __c-13, __c-14, __c-15));
+#else
return vec_perm(__a, __b, (vector unsigned char)
(__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
__c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15));
+#endif
}
static vector unsigned int __ATTRS_o_ai
vec_sld(vector unsigned int __a, vector unsigned int __b, unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(__a, __b, (vector unsigned char)
+ (__c, __c-1, __c-2, __c-3, __c-4, __c-5, __c-6, __c-7,
+ __c-8, __c-9, __c-10, __c-11, __c-12, __c-13, __c-14, __c-15));
+#else
return vec_perm(__a, __b, (vector unsigned char)
(__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
__c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15));
+#endif
}
static vector float __ATTRS_o_ai
vec_sld(vector float __a, vector float __b, unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(__a, __b, (vector unsigned char)
+ (__c, __c-1, __c-2, __c-3, __c-4, __c-5, __c-6, __c-7,
+ __c-8, __c-9, __c-10, __c-11, __c-12, __c-13, __c-14, __c-15));
+#else
return vec_perm(__a, __b, (vector unsigned char)
(__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
__c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15));
+#endif
}
/* vec_vsldoi */
@@ -4988,65 +5338,113 @@ vec_sld(vector float __a, vector float __b, unsigned char __c)
static vector signed char __ATTRS_o_ai
vec_vsldoi(vector signed char __a, vector signed char __b, unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(__a, __b, (vector unsigned char)
+ (__c, __c-1, __c-2, __c-3, __c-4, __c-5, __c-6, __c-7,
+ __c-8, __c-9, __c-10, __c-11, __c-12, __c-13, __c-14, __c-15));
+#else
return vec_perm(__a, __b, (vector unsigned char)
(__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
__c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15));
+#endif
}
static vector unsigned char __ATTRS_o_ai
vec_vsldoi(vector unsigned char __a, vector unsigned char __b, unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(__a, __b, (vector unsigned char)
+ (__c, __c-1, __c-2, __c-3, __c-4, __c-5, __c-6, __c-7,
+ __c-8, __c-9, __c-10, __c-11, __c-12, __c-13, __c-14, __c-15));
+#else
return vec_perm(__a, __b, (vector unsigned char)
(__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
__c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15));
+#endif
}
static vector short __ATTRS_o_ai
vec_vsldoi(vector short __a, vector short __b, unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(__a, __b, (vector unsigned char)
+ (__c, __c-1, __c-2, __c-3, __c-4, __c-5, __c-6, __c-7,
+ __c-8, __c-9, __c-10, __c-11, __c-12, __c-13, __c-14, __c-15));
+#else
return vec_perm(__a, __b, (vector unsigned char)
(__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
__c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15));
+#endif
}
static vector unsigned short __ATTRS_o_ai
vec_vsldoi(vector unsigned short __a, vector unsigned short __b, unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(__a, __b, (vector unsigned char)
+ (__c, __c-1, __c-2, __c-3, __c-4, __c-5, __c-6, __c-7,
+ __c-8, __c-9, __c-10, __c-11, __c-12, __c-13, __c-14, __c-15));
+#else
return vec_perm(__a, __b, (vector unsigned char)
(__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
__c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15));
+#endif
}
static vector pixel __ATTRS_o_ai
vec_vsldoi(vector pixel __a, vector pixel __b, unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(__a, __b, (vector unsigned char)
+ (__c, __c-1, __c-2, __c-3, __c-4, __c-5, __c-6, __c-7,
+ __c-8, __c-9, __c-10, __c-11, __c-12, __c-13, __c-14, __c-15));
+#else
return vec_perm(__a, __b, (vector unsigned char)
(__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
__c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15));
+#endif
}
static vector int __ATTRS_o_ai
vec_vsldoi(vector int __a, vector int __b, unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(__a, __b, (vector unsigned char)
+ (__c, __c-1, __c-2, __c-3, __c-4, __c-5, __c-6, __c-7,
+ __c-8, __c-9, __c-10, __c-11, __c-12, __c-13, __c-14, __c-15));
+#else
return vec_perm(__a, __b, (vector unsigned char)
(__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
__c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15));
+#endif
}
static vector unsigned int __ATTRS_o_ai
vec_vsldoi(vector unsigned int __a, vector unsigned int __b, unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(__a, __b, (vector unsigned char)
+ (__c, __c-1, __c-2, __c-3, __c-4, __c-5, __c-6, __c-7,
+ __c-8, __c-9, __c-10, __c-11, __c-12, __c-13, __c-14, __c-15));
+#else
return vec_perm(__a, __b, (vector unsigned char)
(__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
__c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15));
+#endif
}
static vector float __ATTRS_o_ai
vec_vsldoi(vector float __a, vector float __b, unsigned char __c)
{
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(__a, __b, (vector unsigned char)
+ (__c, __c-1, __c-2, __c-3, __c-4, __c-5, __c-6, __c-7,
+ __c-8, __c-9, __c-10, __c-11, __c-12, __c-13, __c-14, __c-15));
+#else
return vec_perm(__a, __b, (vector unsigned char)
(__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
__c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15));
+#endif
}
/* vec_sll */
@@ -8054,10 +8452,26 @@ vec_vsum4shs(vector signed short __a, vector int __b)
/* vec_sum2s */
+/* The vsum2sws instruction has a big-endian bias, so that the second
+ input vector and the result always reference big-endian elements
+ 1 and 3 (little-endian element 0 and 2). For ease of porting the
+ programmer wants elements 1 and 3 in both cases, so for little
+ endian we must perform some permutes. */
+
static vector signed int __attribute__((__always_inline__))
vec_sum2s(vector int __a, vector int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ vector int __c = (vector signed int)
+ vec_perm(__b, __b, (vector unsigned char)
+ (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
+ __c = __builtin_altivec_vsum2sws(__a, __c);
+ return (vector signed int)
+ vec_perm(__c, __c, (vector unsigned char)
+ (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
+#else
return __builtin_altivec_vsum2sws(__a, __b);
+#endif
}
/* vec_vsum2sws */
@@ -8065,15 +8479,41 @@ vec_sum2s(vector int __a, vector int __b)
static vector signed int __attribute__((__always_inline__))
vec_vsum2sws(vector int __a, vector int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ vector int __c = (vector signed int)
+ vec_perm(__b, __b, (vector unsigned char)
+ (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
+ __c = __builtin_altivec_vsum2sws(__a, __c);
+ return (vector signed int)
+ vec_perm(__c, __c, (vector unsigned char)
+ (4,5,6,7,0,1,2,3,12,13,14,15,8,9,10,11));
+#else
return __builtin_altivec_vsum2sws(__a, __b);
+#endif
}
/* vec_sums */
+/* The vsumsws instruction has a big-endian bias, so that the second
+ input vector and the result always reference big-endian element 3
+ (little-endian element 0). For ease of porting the programmer
+ wants element 3 in both cases, so for little endian we must perform
+ some permutes. */
+
static vector signed int __attribute__((__always_inline__))
vec_sums(vector signed int __a, vector signed int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ __b = (vector signed int)
+ vec_perm(__b, __b, (vector unsigned char)
+ (12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11));
+ __b = __builtin_altivec_vsumsws(__a, __b);
+ return (vector signed int)
+ vec_perm(__b, __b, (vector unsigned char)
+ (4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3));
+#else
return __builtin_altivec_vsumsws(__a, __b);
+#endif
}
/* vec_vsumsws */
@@ -8081,7 +8521,17 @@ vec_sums(vector signed int __a, vector signed int __b)
static vector signed int __attribute__((__always_inline__))
vec_vsumsws(vector signed int __a, vector signed int __b)
{
+#ifdef __LITTLE_ENDIAN__
+ __b = (vector signed int)
+ vec_perm(__b, __b, (vector unsigned char)
+ (12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,11));
+ __b = __builtin_altivec_vsumsws(__a, __b);
+ return (vector signed int)
+ vec_perm(__b, __b, (vector unsigned char)
+ (4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3));
+#else
return __builtin_altivec_vsumsws(__a, __b);
+#endif
}
/* vec_trunc */
@@ -8102,34 +8552,57 @@ vec_vrfiz(vector float __a)
/* vec_unpackh */
+/* The vector unpack instructions all have a big-endian bias, so for
+ little endian we must reverse the meanings of "high" and "low." */
+
static vector short __ATTRS_o_ai
vec_unpackh(vector signed char __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsb((vector char)__a);
+#else
return __builtin_altivec_vupkhsb((vector char)__a);
+#endif
}
static vector bool short __ATTRS_o_ai
vec_unpackh(vector bool char __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#else
return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#endif
}
static vector int __ATTRS_o_ai
vec_unpackh(vector short __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsh(__a);
+#else
return __builtin_altivec_vupkhsh(__a);
+#endif
}
static vector bool int __ATTRS_o_ai
vec_unpackh(vector bool short __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#else
return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#endif
}
static vector unsigned int __ATTRS_o_ai
vec_unpackh(vector pixel __a)
{
- return (vector unsigned int)__builtin_altivec_vupkhsh((vector short)__a);
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#endif
}
/* vec_vupkhsb */
@@ -8137,13 +8610,21 @@ vec_unpackh(vector pixel __a)
static vector short __ATTRS_o_ai
vec_vupkhsb(vector signed char __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsb((vector char)__a);
+#else
return __builtin_altivec_vupkhsb((vector char)__a);
+#endif
}
static vector bool short __ATTRS_o_ai
vec_vupkhsb(vector bool char __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#else
return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#endif
}
/* vec_vupkhsh */
@@ -8151,19 +8632,31 @@ vec_vupkhsb(vector bool char __a)
static vector int __ATTRS_o_ai
vec_vupkhsh(vector short __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupklsh(__a);
+#else
return __builtin_altivec_vupkhsh(__a);
+#endif
}
static vector bool int __ATTRS_o_ai
vec_vupkhsh(vector bool short __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#else
return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#endif
}
static vector unsigned int __ATTRS_o_ai
vec_vupkhsh(vector pixel __a)
{
- return (vector unsigned int)__builtin_altivec_vupkhsh((vector short)__a);
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#endif
}
/* vec_unpackl */
@@ -8171,31 +8664,51 @@ vec_vupkhsh(vector pixel __a)
static vector short __ATTRS_o_ai
vec_unpackl(vector signed char __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsb((vector char)__a);
+#else
return __builtin_altivec_vupklsb((vector char)__a);
+#endif
}
static vector bool short __ATTRS_o_ai
vec_unpackl(vector bool char __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#else
return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#endif
}
static vector int __ATTRS_o_ai
vec_unpackl(vector short __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsh(__a);
+#else
return __builtin_altivec_vupklsh(__a);
+#endif
}
static vector bool int __ATTRS_o_ai
vec_unpackl(vector bool short __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#else
return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#endif
}
static vector unsigned int __ATTRS_o_ai
vec_unpackl(vector pixel __a)
{
- return (vector unsigned int)__builtin_altivec_vupklsh((vector short)__a);
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#endif
}
/* vec_vupklsb */
@@ -8203,13 +8716,21 @@ vec_unpackl(vector pixel __a)
static vector short __ATTRS_o_ai
vec_vupklsb(vector signed char __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsb((vector char)__a);
+#else
return __builtin_altivec_vupklsb((vector char)__a);
+#endif
}
static vector bool short __ATTRS_o_ai
vec_vupklsb(vector bool char __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
+#else
return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
+#endif
}
/* vec_vupklsh */
@@ -8217,19 +8738,31 @@ vec_vupklsb(vector bool char __a)
static vector int __ATTRS_o_ai
vec_vupklsh(vector short __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vupkhsh(__a);
+#else
return __builtin_altivec_vupklsh(__a);
+#endif
}
static vector bool int __ATTRS_o_ai
vec_vupklsh(vector bool short __a)
{
+#ifdef __LITTLE_ENDIAN__
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
+#else
return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
+#endif
}
static vector unsigned int __ATTRS_o_ai
vec_vupklsh(vector pixel __a)
{
- return (vector unsigned int)__builtin_altivec_vupklsh((vector short)__a);
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
+#else
+ return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
+#endif
}
/* vec_xor */
diff --git a/renderscript/clang-include/arm_acle.h b/renderscript/clang-include/arm_acle.h
new file mode 100644
index 0000000..d706745
--- /dev/null
+++ b/renderscript/clang-include/arm_acle.h
@@ -0,0 +1,151 @@
+/*===---- arm_acle.h - ARM Non-Neon intrinsics -----------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_ACLE_H
+#define __ARM_ACLE_H
+
+#ifndef __ARM_ACLE
+#error "ACLE intrinsics support not enabled."
+#endif
+
+#include <stdint.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Miscellaneous data-processing intrinsics */
+
+static __inline__ uint32_t __attribute__((always_inline, nodebug))
+ __clz(uint32_t t) {
+ return __builtin_clz(t);
+}
+
+static __inline__ unsigned long __attribute__((always_inline, nodebug))
+ __clzl(unsigned long t) {
+ return __builtin_clzl(t);
+}
+
+static __inline__ uint64_t __attribute__((always_inline, nodebug))
+ __clzll(uint64_t t) {
+#if __SIZEOF_LONG_LONG__ == 8
+ return __builtin_clzll(t);
+#else
+ return __builtin_clzl(t);
+#endif
+}
+
+static __inline__ uint32_t __attribute__((always_inline, nodebug))
+ __rev(uint32_t t) {
+ return __builtin_bswap32(t);
+}
+
+static __inline__ unsigned long __attribute__((always_inline, nodebug))
+ __revl(unsigned long t) {
+#if __SIZEOF_LONG__ == 4
+ return __builtin_bswap32(t);
+#else
+ return __builtin_bswap64(t);
+#endif
+}
+
+static __inline__ uint64_t __attribute__((always_inline, nodebug))
+ __revll(uint64_t t) {
+ return __builtin_bswap64(t);
+}
+
+
+/*
+ * Saturating intrinsics
+ *
+ * FIXME: Change guard to their corrosponding __ARM_FEATURE flag when Q flag
+ * intrinsics are implemented and the flag is enabled.
+ */
+#if __ARM_32BIT_STATE
+#define __ssat(x, y) __builtin_arm_ssat(x, y)
+#define __usat(x, y) __builtin_arm_usat(x, y)
+
+static __inline__ int32_t __attribute__((always_inline, nodebug))
+ __qadd(int32_t t, int32_t v) {
+ return __builtin_arm_qadd(t, v);
+}
+
+static __inline__ int32_t __attribute__((always_inline, nodebug))
+ __qsub(int32_t t, int32_t v) {
+ return __builtin_arm_qsub(t, v);
+}
+
+static __inline__ int32_t __attribute__((always_inline, nodebug))
+__qdbl(int32_t t) {
+ return __builtin_arm_qadd(t, t);
+}
+#endif
+
+/* CRC32 intrinsics */
+#if __ARM_FEATURE_CRC32
+static __inline__ uint32_t __attribute__((always_inline, nodebug))
+ __crc32b(uint32_t a, uint8_t b) {
+ return __builtin_arm_crc32b(a, b);
+}
+
+static __inline__ uint32_t __attribute__((always_inline, nodebug))
+ __crc32h(uint32_t a, uint16_t b) {
+ return __builtin_arm_crc32h(a, b);
+}
+
+static __inline__ uint32_t __attribute__((always_inline, nodebug))
+ __crc32w(uint32_t a, uint32_t b) {
+ return __builtin_arm_crc32w(a, b);
+}
+
+static __inline__ uint32_t __attribute__((always_inline, nodebug))
+ __crc32d(uint32_t a, uint64_t b) {
+ return __builtin_arm_crc32d(a, b);
+}
+
+static __inline__ uint32_t __attribute__((always_inline, nodebug))
+ __crc32cb(uint32_t a, uint8_t b) {
+ return __builtin_arm_crc32cb(a, b);
+}
+
+static __inline__ uint32_t __attribute__((always_inline, nodebug))
+ __crc32ch(uint32_t a, uint16_t b) {
+ return __builtin_arm_crc32ch(a, b);
+}
+
+static __inline__ uint32_t __attribute__((always_inline, nodebug))
+ __crc32cw(uint32_t a, uint32_t b) {
+ return __builtin_arm_crc32cw(a, b);
+}
+
+static __inline__ uint32_t __attribute__((always_inline, nodebug))
+ __crc32cd(uint32_t a, uint64_t b) {
+ return __builtin_arm_crc32cd(a, b);
+}
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __ARM_ACLE_H */
diff --git a/renderscript/clang-include/avxintrin.h b/renderscript/clang-include/avxintrin.h
index 3d50439..4e1044a 100644
--- a/renderscript/clang-include/avxintrin.h
+++ b/renderscript/clang-include/avxintrin.h
@@ -737,19 +737,22 @@ _mm256_zeroupper(void)
static __inline __m128 __attribute__((__always_inline__, __nodebug__))
_mm_broadcast_ss(float const *__a)
{
- return (__m128)__builtin_ia32_vbroadcastss(__a);
+ float __f = *__a;
+ return (__m128)(__v4sf){ __f, __f, __f, __f };
}
static __inline __m256d __attribute__((__always_inline__, __nodebug__))
_mm256_broadcast_sd(double const *__a)
{
- return (__m256d)__builtin_ia32_vbroadcastsd256(__a);
+ double __d = *__a;
+ return (__m256d)(__v4df){ __d, __d, __d, __d };
}
static __inline __m256 __attribute__((__always_inline__, __nodebug__))
_mm256_broadcast_ss(float const *__a)
{
- return (__m256)__builtin_ia32_vbroadcastss256(__a);
+ float __f = *__a;
+ return (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };
}
static __inline __m256d __attribute__((__always_inline__, __nodebug__))
diff --git a/renderscript/clang-include/bmiintrin.h b/renderscript/clang-include/bmiintrin.h
index 8cb00f5..43c4a5e 100644
--- a/renderscript/clang-include/bmiintrin.h
+++ b/renderscript/clang-include/bmiintrin.h
@@ -32,6 +32,14 @@
#ifndef __BMIINTRIN_H
#define __BMIINTRIN_H
+#define _tzcnt_u16(a) (__tzcnt_u16((a)))
+#define _andn_u32(a, b) (__andn_u32((a), (b)))
+/* _bextr_u32 != __bextr_u32 */
+#define _blsi_u32(a) (__blsi_u32((a)))
+#define _blsmsk_u32(a) (__blsmsk_u32((a)))
+#define _blsr_u32(a) (__blsr_u32((a)))
+#define _tzcnt_u32(a) (__tzcnt_u32((a)))
+
static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
__tzcnt_u16(unsigned short __X)
{
@@ -44,12 +52,20 @@ __andn_u32(unsigned int __X, unsigned int __Y)
return ~__X & __Y;
}
+/* AMD-specified, double-leading-underscore version of BEXTR */
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__bextr_u32(unsigned int __X, unsigned int __Y)
{
return __builtin_ia32_bextr_u32(__X, __Y);
}
+/* Intel-specified, single-leading-underscore version of BEXTR */
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_bextr_u32(unsigned int __X, unsigned int __Y, unsigned int __Z)
+{
+ return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
+}
+
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__blsi_u32(unsigned int __X)
{
@@ -75,18 +91,34 @@ __tzcnt_u32(unsigned int __X)
}
#ifdef __x86_64__
+
+#define _andn_u64(a, b) (__andn_u64((a), (b)))
+/* _bextr_u64 != __bextr_u64 */
+#define _blsi_u64(a) (__blsi_u64((a)))
+#define _blsmsk_u64(a) (__blsmsk_u64((a)))
+#define _blsr_u64(a) (__blsr_u64((a)))
+#define _tzcnt_u64(a) (__tzcnt_u64((a)))
+
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__andn_u64 (unsigned long long __X, unsigned long long __Y)
{
return ~__X & __Y;
}
+/* AMD-specified, double-leading-underscore version of BEXTR */
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__bextr_u64(unsigned long long __X, unsigned long long __Y)
{
return __builtin_ia32_bextr_u64(__X, __Y);
}
+/* Intel-specified, single-leading-underscore version of BEXTR */
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_bextr_u64(unsigned long long __X, unsigned int __Y, unsigned int __Z)
+{
+ return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
+}
+
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__blsi_u64(unsigned long long __X)
{
@@ -110,6 +142,7 @@ __tzcnt_u64(unsigned long long __X)
{
return __builtin_ctzll(__X);
}
-#endif
+
+#endif /* __x86_64__ */
#endif /* __BMIINTRIN_H */
diff --git a/renderscript/clang-include/ia32intrin.h b/renderscript/clang-include/ia32intrin.h
index 55c2247..5adf3f1 100644
--- a/renderscript/clang-include/ia32intrin.h
+++ b/renderscript/clang-include/ia32intrin.h
@@ -79,6 +79,11 @@ __writeeflags(unsigned int __f)
}
#endif /* !__x86_64__ */
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__rdpmc(int __A) {
+ return __builtin_ia32_rdpmc(__A);
+}
+
/* __rdtsc */
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__rdtsc(void) {
diff --git a/renderscript/clang-include/stdarg.h b/renderscript/clang-include/stdarg.h
index 6110a06..a57e183 100644
--- a/renderscript/clang-include/stdarg.h
+++ b/renderscript/clang-include/stdarg.h
@@ -44,7 +44,9 @@ typedef __builtin_va_list va_list;
#endif
/* Hack required to make standard headers work, at least on Ubuntu */
+#ifndef __GNUC_VA_LIST
#define __GNUC_VA_LIST 1
+#endif
typedef __builtin_va_list __gnuc_va_list;
#endif /* __STDARG_H */
diff --git a/renderscript/clang-include/x86intrin.h b/renderscript/clang-include/x86intrin.h
index be9e71d..21a43da 100644
--- a/renderscript/clang-include/x86intrin.h
+++ b/renderscript/clang-include/x86intrin.h
@@ -76,6 +76,6 @@
#include <f16cintrin.h>
#endif
-// FIXME: LWP
+/* FIXME: LWP */
#endif /* __X86INTRIN_H */
diff --git a/renderscript/clang-include/xmmintrin.h b/renderscript/clang-include/xmmintrin.h
index e777ec0..c9befcb 100644
--- a/renderscript/clang-include/xmmintrin.h
+++ b/renderscript/clang-include/xmmintrin.h
@@ -34,8 +34,8 @@ typedef int __v4si __attribute__((__vector_size__(16)));
typedef float __v4sf __attribute__((__vector_size__(16)));
typedef float __m128 __attribute__((__vector_size__(16)));
-// This header should only be included in a hosted environment as it depends on
-// a standard library to provide allocation routines.
+/* This header should only be included in a hosted environment as it depends on
+ * a standard library to provide allocation routines. */
#if __STDC_HOSTED__
#include <mm_malloc.h>
#endif
@@ -589,7 +589,7 @@ _mm_set1_ps(float __w)
return (__m128){ __w, __w, __w, __w };
}
-// Microsoft specific.
+/* Microsoft specific. */
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_set_ps1(float __w)
{
diff --git a/renderscript/include/rs_allocation.rsh b/renderscript/include/rs_allocation.rsh
index 773c386..6f3f8d9 100644
--- a/renderscript/include/rs_allocation.rsh
+++ b/renderscript/include/rs_allocation.rsh
@@ -375,7 +375,7 @@ extern const uchar __attribute__((overloadable))
#endif // (defined(RS_VERSION) && (RS_VERSION >= 18))
-#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+#if (defined(RS_VERSION) && (RS_VERSION >= 999))
#define VOP(T) \
extern T __attribute__((overloadable)) \
diff --git a/renderscript/include/rs_core_math.rsh b/renderscript/include/rs_core_math.rsh
index a72e1ac..287a1b9 100644
--- a/renderscript/include/rs_core_math.rsh
+++ b/renderscript/include/rs_core_math.rsh
@@ -456,7 +456,7 @@ extern float4 __attribute__((const, overloadable))atan2pi(float4 y, float4 x);
*
* Supported by API versions 9 and newer.
*/
-extern float __attribute__((const, overloadable))atanh(float);
+extern float __attribute__((const, overloadable))atanh(float v);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
@@ -465,7 +465,7 @@ extern float __attribute__((const, overloadable))atanh(float);
*
* Supported by API versions 9 and newer.
*/
-extern float2 __attribute__((const, overloadable))atanh(float2);
+extern float2 __attribute__((const, overloadable))atanh(float2 v);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
@@ -474,7 +474,7 @@ extern float2 __attribute__((const, overloadable))atanh(float2);
*
* Supported by API versions 9 and newer.
*/
-extern float3 __attribute__((const, overloadable))atanh(float3);
+extern float3 __attribute__((const, overloadable))atanh(float3 v);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
@@ -483,7 +483,7 @@ extern float3 __attribute__((const, overloadable))atanh(float3);
*
* Supported by API versions 9 and newer.
*/
-extern float4 __attribute__((const, overloadable))atanh(float4);
+extern float4 __attribute__((const, overloadable))atanh(float4 v);
#endif
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
@@ -2952,1379 +2952,1379 @@ extern uint3 __attribute__((const, overloadable))convert_uint3(uint3 v);
extern uint4 __attribute__((const, overloadable))convert_uint4(uint4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double2 to double2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double2 __attribute__((const, overloadable))convert_double2(double2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double3 to double3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double3 __attribute__((const, overloadable))convert_double3(double3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double4 to double4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double4 __attribute__((const, overloadable))convert_double4(double4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long2 to double2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double2 __attribute__((const, overloadable))convert_double2(long2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long3 to double3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double3 __attribute__((const, overloadable))convert_double3(long3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long4 to double4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double4 __attribute__((const, overloadable))convert_double4(long4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong2 to double2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double2 __attribute__((const, overloadable))convert_double2(ulong2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong3 to double3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double3 __attribute__((const, overloadable))convert_double3(ulong3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong4 to double4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double4 __attribute__((const, overloadable))convert_double4(ulong4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double2 to long2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long2 __attribute__((const, overloadable))convert_long2(double2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double3 to long3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long3 __attribute__((const, overloadable))convert_long3(double3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double4 to long4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long4 __attribute__((const, overloadable))convert_long4(double4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long2 to long2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long2 __attribute__((const, overloadable))convert_long2(long2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long3 to long3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long3 __attribute__((const, overloadable))convert_long3(long3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long4 to long4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long4 __attribute__((const, overloadable))convert_long4(long4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong2 to long2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long2 __attribute__((const, overloadable))convert_long2(ulong2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong3 to long3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long3 __attribute__((const, overloadable))convert_long3(ulong3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong4 to long4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long4 __attribute__((const, overloadable))convert_long4(ulong4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double2 to ulong2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong2 __attribute__((const, overloadable))convert_ulong2(double2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double3 to ulong3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong3 __attribute__((const, overloadable))convert_ulong3(double3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double4 to ulong4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong4 __attribute__((const, overloadable))convert_ulong4(double4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long2 to ulong2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong2 __attribute__((const, overloadable))convert_ulong2(long2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long3 to ulong3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong3 __attribute__((const, overloadable))convert_ulong3(long3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long4 to ulong4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong4 __attribute__((const, overloadable))convert_ulong4(long4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong2 to ulong2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong2 __attribute__((const, overloadable))convert_ulong2(ulong2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong3 to ulong3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong3 __attribute__((const, overloadable))convert_ulong3(ulong3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong4 to ulong4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong4 __attribute__((const, overloadable))convert_ulong4(ulong4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double2 to float2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern float2 __attribute__((const, overloadable))convert_float2(double2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double3 to float3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern float3 __attribute__((const, overloadable))convert_float3(double3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double4 to float4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern float4 __attribute__((const, overloadable))convert_float4(double4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long2 to float2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern float2 __attribute__((const, overloadable))convert_float2(long2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long3 to float3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern float3 __attribute__((const, overloadable))convert_float3(long3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long4 to float4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern float4 __attribute__((const, overloadable))convert_float4(long4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong2 to float2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern float2 __attribute__((const, overloadable))convert_float2(ulong2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong3 to float3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern float3 __attribute__((const, overloadable))convert_float3(ulong3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong4 to float4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern float4 __attribute__((const, overloadable))convert_float4(ulong4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double2 to char2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern char2 __attribute__((const, overloadable))convert_char2(double2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double3 to char3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern char3 __attribute__((const, overloadable))convert_char3(double3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double4 to char4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern char4 __attribute__((const, overloadable))convert_char4(double4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long2 to char2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern char2 __attribute__((const, overloadable))convert_char2(long2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long3 to char3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern char3 __attribute__((const, overloadable))convert_char3(long3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long4 to char4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern char4 __attribute__((const, overloadable))convert_char4(long4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong2 to char2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern char2 __attribute__((const, overloadable))convert_char2(ulong2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong3 to char3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern char3 __attribute__((const, overloadable))convert_char3(ulong3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong4 to char4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern char4 __attribute__((const, overloadable))convert_char4(ulong4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double2 to uchar2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uchar2 __attribute__((const, overloadable))convert_uchar2(double2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double3 to uchar3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uchar3 __attribute__((const, overloadable))convert_uchar3(double3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double4 to uchar4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uchar4 __attribute__((const, overloadable))convert_uchar4(double4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long2 to uchar2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uchar2 __attribute__((const, overloadable))convert_uchar2(long2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long3 to uchar3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uchar3 __attribute__((const, overloadable))convert_uchar3(long3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long4 to uchar4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uchar4 __attribute__((const, overloadable))convert_uchar4(long4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong2 to uchar2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uchar2 __attribute__((const, overloadable))convert_uchar2(ulong2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong3 to uchar3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uchar3 __attribute__((const, overloadable))convert_uchar3(ulong3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong4 to uchar4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uchar4 __attribute__((const, overloadable))convert_uchar4(ulong4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double2 to short2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern short2 __attribute__((const, overloadable))convert_short2(double2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double3 to short3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern short3 __attribute__((const, overloadable))convert_short3(double3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double4 to short4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern short4 __attribute__((const, overloadable))convert_short4(double4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long2 to short2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern short2 __attribute__((const, overloadable))convert_short2(long2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long3 to short3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern short3 __attribute__((const, overloadable))convert_short3(long3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long4 to short4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern short4 __attribute__((const, overloadable))convert_short4(long4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong2 to short2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern short2 __attribute__((const, overloadable))convert_short2(ulong2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong3 to short3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern short3 __attribute__((const, overloadable))convert_short3(ulong3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong4 to short4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern short4 __attribute__((const, overloadable))convert_short4(ulong4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double2 to ushort2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ushort2 __attribute__((const, overloadable))convert_ushort2(double2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double3 to ushort3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ushort3 __attribute__((const, overloadable))convert_ushort3(double3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double4 to ushort4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ushort4 __attribute__((const, overloadable))convert_ushort4(double4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long2 to ushort2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ushort2 __attribute__((const, overloadable))convert_ushort2(long2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long3 to ushort3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ushort3 __attribute__((const, overloadable))convert_ushort3(long3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long4 to ushort4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ushort4 __attribute__((const, overloadable))convert_ushort4(long4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong2 to ushort2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ushort2 __attribute__((const, overloadable))convert_ushort2(ulong2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong3 to ushort3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ushort3 __attribute__((const, overloadable))convert_ushort3(ulong3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong4 to ushort4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ushort4 __attribute__((const, overloadable))convert_ushort4(ulong4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double2 to int2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern int2 __attribute__((const, overloadable))convert_int2(double2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double3 to int3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern int3 __attribute__((const, overloadable))convert_int3(double3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double4 to int4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern int4 __attribute__((const, overloadable))convert_int4(double4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long2 to int2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern int2 __attribute__((const, overloadable))convert_int2(long2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long3 to int3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern int3 __attribute__((const, overloadable))convert_int3(long3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long4 to int4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern int4 __attribute__((const, overloadable))convert_int4(long4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong2 to int2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern int2 __attribute__((const, overloadable))convert_int2(ulong2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong3 to int3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern int3 __attribute__((const, overloadable))convert_int3(ulong3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong4 to int4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern int4 __attribute__((const, overloadable))convert_int4(ulong4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double2 to uint2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uint2 __attribute__((const, overloadable))convert_uint2(double2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double3 to uint3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uint3 __attribute__((const, overloadable))convert_uint3(double3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from double4 to uint4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uint4 __attribute__((const, overloadable))convert_uint4(double4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long2 to uint2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uint2 __attribute__((const, overloadable))convert_uint2(long2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long3 to uint3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uint3 __attribute__((const, overloadable))convert_uint3(long3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from long4 to uint4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uint4 __attribute__((const, overloadable))convert_uint4(long4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong2 to uint2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uint2 __attribute__((const, overloadable))convert_uint2(ulong2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong3 to uint3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uint3 __attribute__((const, overloadable))convert_uint3(ulong3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ulong4 to uint4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uint4 __attribute__((const, overloadable))convert_uint4(ulong4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from float2 to double2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double2 __attribute__((const, overloadable))convert_double2(float2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from float3 to double3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double3 __attribute__((const, overloadable))convert_double3(float3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from float4 to double4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double4 __attribute__((const, overloadable))convert_double4(float4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from char2 to double2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double2 __attribute__((const, overloadable))convert_double2(char2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from char3 to double3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double3 __attribute__((const, overloadable))convert_double3(char3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from char4 to double4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double4 __attribute__((const, overloadable))convert_double4(char4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uchar2 to double2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double2 __attribute__((const, overloadable))convert_double2(uchar2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uchar3 to double3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double3 __attribute__((const, overloadable))convert_double3(uchar3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uchar4 to double4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double4 __attribute__((const, overloadable))convert_double4(uchar4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from short2 to double2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double2 __attribute__((const, overloadable))convert_double2(short2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from short3 to double3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double3 __attribute__((const, overloadable))convert_double3(short3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from short4 to double4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double4 __attribute__((const, overloadable))convert_double4(short4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ushort2 to double2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double2 __attribute__((const, overloadable))convert_double2(ushort2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ushort3 to double3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double3 __attribute__((const, overloadable))convert_double3(ushort3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ushort4 to double4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double4 __attribute__((const, overloadable))convert_double4(ushort4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from int2 to double2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double2 __attribute__((const, overloadable))convert_double2(int2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from int3 to double3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double3 __attribute__((const, overloadable))convert_double3(int3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from int4 to double4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double4 __attribute__((const, overloadable))convert_double4(int4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uint2 to double2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double2 __attribute__((const, overloadable))convert_double2(uint2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uint3 to double3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double3 __attribute__((const, overloadable))convert_double3(uint3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uint4 to double4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern double4 __attribute__((const, overloadable))convert_double4(uint4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from float2 to long2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long2 __attribute__((const, overloadable))convert_long2(float2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from float3 to long3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long3 __attribute__((const, overloadable))convert_long3(float3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from float4 to long4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long4 __attribute__((const, overloadable))convert_long4(float4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from char2 to long2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long2 __attribute__((const, overloadable))convert_long2(char2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from char3 to long3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long3 __attribute__((const, overloadable))convert_long3(char3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from char4 to long4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long4 __attribute__((const, overloadable))convert_long4(char4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uchar2 to long2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long2 __attribute__((const, overloadable))convert_long2(uchar2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uchar3 to long3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long3 __attribute__((const, overloadable))convert_long3(uchar3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uchar4 to long4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long4 __attribute__((const, overloadable))convert_long4(uchar4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from short2 to long2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long2 __attribute__((const, overloadable))convert_long2(short2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from short3 to long3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long3 __attribute__((const, overloadable))convert_long3(short3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from short4 to long4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long4 __attribute__((const, overloadable))convert_long4(short4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ushort2 to long2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long2 __attribute__((const, overloadable))convert_long2(ushort2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ushort3 to long3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long3 __attribute__((const, overloadable))convert_long3(ushort3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ushort4 to long4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long4 __attribute__((const, overloadable))convert_long4(ushort4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from int2 to long2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long2 __attribute__((const, overloadable))convert_long2(int2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from int3 to long3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long3 __attribute__((const, overloadable))convert_long3(int3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from int4 to long4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long4 __attribute__((const, overloadable))convert_long4(int4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uint2 to long2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long2 __attribute__((const, overloadable))convert_long2(uint2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uint3 to long3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long3 __attribute__((const, overloadable))convert_long3(uint3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uint4 to long4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long4 __attribute__((const, overloadable))convert_long4(uint4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from float2 to ulong2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong2 __attribute__((const, overloadable))convert_ulong2(float2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from float3 to ulong3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong3 __attribute__((const, overloadable))convert_ulong3(float3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from float4 to ulong4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong4 __attribute__((const, overloadable))convert_ulong4(float4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from char2 to ulong2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong2 __attribute__((const, overloadable))convert_ulong2(char2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from char3 to ulong3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong3 __attribute__((const, overloadable))convert_ulong3(char3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from char4 to ulong4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong4 __attribute__((const, overloadable))convert_ulong4(char4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uchar2 to ulong2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong2 __attribute__((const, overloadable))convert_ulong2(uchar2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uchar3 to ulong3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong3 __attribute__((const, overloadable))convert_ulong3(uchar3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uchar4 to ulong4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong4 __attribute__((const, overloadable))convert_ulong4(uchar4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from short2 to ulong2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong2 __attribute__((const, overloadable))convert_ulong2(short2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from short3 to ulong3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong3 __attribute__((const, overloadable))convert_ulong3(short3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from short4 to ulong4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong4 __attribute__((const, overloadable))convert_ulong4(short4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ushort2 to ulong2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong2 __attribute__((const, overloadable))convert_ulong2(ushort2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ushort3 to ulong3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong3 __attribute__((const, overloadable))convert_ulong3(ushort3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from ushort4 to ulong4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong4 __attribute__((const, overloadable))convert_ulong4(ushort4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from int2 to ulong2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong2 __attribute__((const, overloadable))convert_ulong2(int2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from int3 to ulong3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong3 __attribute__((const, overloadable))convert_ulong3(int3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from int4 to ulong4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong4 __attribute__((const, overloadable))convert_ulong4(int4 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uint2 to ulong2
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong2 __attribute__((const, overloadable))convert_ulong2(uint2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uint3 to ulong3
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong3 __attribute__((const, overloadable))convert_ulong3(uint3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Component wise conversion from uint4 to ulong4
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong4 __attribute__((const, overloadable))convert_ulong4(uint4 v);
#endif
@@ -5667,7 +5667,7 @@ extern float __attribute__((const, overloadable))length(float4 v);
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the log gamma and sign
+ * Return the log gamma
*
* Supported by API versions 9 and newer.
*/
@@ -5676,7 +5676,7 @@ extern float __attribute__((const, overloadable))lgamma(float);
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the log gamma and sign
+ * Return the log gamma
*
* Supported by API versions 9 and newer.
*/
@@ -5685,7 +5685,7 @@ extern float2 __attribute__((const, overloadable))lgamma(float2);
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the log gamma and sign
+ * Return the log gamma
*
* Supported by API versions 9 and newer.
*/
@@ -5694,7 +5694,7 @@ extern float3 __attribute__((const, overloadable))lgamma(float3);
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the log gamma and sign
+ * Return the log gamma
*
* Supported by API versions 9 and newer.
*/
@@ -6325,290 +6325,290 @@ static uint4 __attribute__((const, overloadable))max(uint4 v1, uint4 v2) {
}
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern char __attribute__((const, overloadable))max(char v1, char v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern char2 __attribute__((const, overloadable))max(char2 v1, char2 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern char3 __attribute__((const, overloadable))max(char3 v1, char3 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern char4 __attribute__((const, overloadable))max(char4 v1, char4 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uchar __attribute__((const, overloadable))max(uchar v1, uchar v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uchar2 __attribute__((const, overloadable))max(uchar2 v1, uchar2 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uchar3 __attribute__((const, overloadable))max(uchar3 v1, uchar3 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uchar4 __attribute__((const, overloadable))max(uchar4 v1, uchar4 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern short __attribute__((const, overloadable))max(short v1, short v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern short2 __attribute__((const, overloadable))max(short2 v1, short2 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern short3 __attribute__((const, overloadable))max(short3 v1, short3 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern short4 __attribute__((const, overloadable))max(short4 v1, short4 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ushort __attribute__((const, overloadable))max(ushort v1, ushort v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ushort2 __attribute__((const, overloadable))max(ushort2 v1, ushort2 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ushort3 __attribute__((const, overloadable))max(ushort3 v1, ushort3 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ushort4 __attribute__((const, overloadable))max(ushort4 v1, ushort4 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern int __attribute__((const, overloadable))max(int v1, int v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern int2 __attribute__((const, overloadable))max(int2 v1, int2 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern int3 __attribute__((const, overloadable))max(int3 v1, int3 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern int4 __attribute__((const, overloadable))max(int4 v1, int4 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uint __attribute__((const, overloadable))max(uint v1, uint v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uint2 __attribute__((const, overloadable))max(uint2 v1, uint2 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uint3 __attribute__((const, overloadable))max(uint3 v1, uint3 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uint4 __attribute__((const, overloadable))max(uint4 v1, uint4 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long __attribute__((const, overloadable))max(long v1, long v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long2 __attribute__((const, overloadable))max(long2 v1, long2 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long3 __attribute__((const, overloadable))max(long3 v1, long3 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long4 __attribute__((const, overloadable))max(long4 v1, long4 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong __attribute__((const, overloadable))max(ulong v1, ulong v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong2 __attribute__((const, overloadable))max(ulong2 v1, ulong2 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong3 __attribute__((const, overloadable))max(ulong3 v1, ulong3 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the maximum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong4 __attribute__((const, overloadable))max(ulong4 v1, ulong4 v2);
#endif
@@ -6985,290 +6985,290 @@ static uint4 __attribute__((const, overloadable))min(uint4 v1, uint4 v2) {
}
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern char __attribute__((const, overloadable))min(char v1, char v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern char2 __attribute__((const, overloadable))min(char2 v1, char2 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern char3 __attribute__((const, overloadable))min(char3 v1, char3 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern char4 __attribute__((const, overloadable))min(char4 v1, char4 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uchar __attribute__((const, overloadable))min(uchar v1, uchar v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uchar2 __attribute__((const, overloadable))min(uchar2 v1, uchar2 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uchar3 __attribute__((const, overloadable))min(uchar3 v1, uchar3 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uchar4 __attribute__((const, overloadable))min(uchar4 v1, uchar4 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern short __attribute__((const, overloadable))min(short v1, short v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern short2 __attribute__((const, overloadable))min(short2 v1, short2 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern short3 __attribute__((const, overloadable))min(short3 v1, short3 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern short4 __attribute__((const, overloadable))min(short4 v1, short4 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ushort __attribute__((const, overloadable))min(ushort v1, ushort v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ushort2 __attribute__((const, overloadable))min(ushort2 v1, ushort2 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ushort3 __attribute__((const, overloadable))min(ushort3 v1, ushort3 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ushort4 __attribute__((const, overloadable))min(ushort4 v1, ushort4 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern int __attribute__((const, overloadable))min(int v1, int v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern int2 __attribute__((const, overloadable))min(int2 v1, int2 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern int3 __attribute__((const, overloadable))min(int3 v1, int3 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern int4 __attribute__((const, overloadable))min(int4 v1, int4 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uint __attribute__((const, overloadable))min(uint v1, uint v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uint2 __attribute__((const, overloadable))min(uint2 v1, uint2 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uint3 __attribute__((const, overloadable))min(uint3 v1, uint3 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern uint4 __attribute__((const, overloadable))min(uint4 v1, uint4 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long __attribute__((const, overloadable))min(long v1, long v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long2 __attribute__((const, overloadable))min(long2 v1, long2 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long3 __attribute__((const, overloadable))min(long3 v1, long3 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern long4 __attribute__((const, overloadable))min(long4 v1, long4 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong __attribute__((const, overloadable))min(ulong v1, ulong v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong2 __attribute__((const, overloadable))min(ulong2 v1, ulong2 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong3 __attribute__((const, overloadable))min(ulong3 v1, ulong3 v2);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* Return the minimum value from two arguments
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern ulong4 __attribute__((const, overloadable))min(ulong4 v1, ulong4 v2);
#endif
@@ -7397,6 +7397,618 @@ extern float4 __attribute__((overloadable))modf(float4 x, float4* iret);
extern float __attribute__((const, overloadable))nan(uint);
#endif
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * acos
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_acos(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * acos
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_acos(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * acos
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_acos(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * acos
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_acos(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * acosh
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_acosh(float);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * acosh
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_acosh(float2);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * acosh
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_acosh(float3);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * acosh
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_acosh(float4);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * acospi
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_acospi(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * acospi
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_acospi(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * acospi
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_acospi(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * acospi
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_acospi(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * asin
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_asin(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * asin
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_asin(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * asin
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_asin(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * asin
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_asin(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * asinh
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_asinh(float);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * asinh
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_asinh(float2);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * asinh
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_asinh(float3);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * asinh
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_asinh(float4);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse sine divided by PI.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_asinpi(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse sine divided by PI.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_asinpi(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse sine divided by PI.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_asinpi(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse sine divided by PI.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_asinpi(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse tangent.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_atan(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse tangent.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_atan(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse tangent.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_atan(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse tangent.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_atan(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse tangent of y / x.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_atan2(float y, float x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse tangent of y / x.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_atan2(float2 y, float2 x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse tangent of y / x.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_atan2(float3 y, float3 x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse tangent of y / x.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_atan2(float4 y, float4 x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse tangent of y / x, divided by PI.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_atan2pi(float y, float x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse tangent of y / x, divided by PI.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_atan2pi(float2 y, float2 x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse tangent of y / x, divided by PI.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_atan2pi(float3 y, float3 x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse tangent of y / x, divided by PI.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_atan2pi(float4 y, float4 x);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse hyperbolic tangent.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_atanh(float in);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse hyperbolic tangent.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_atanh(float2 in);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse hyperbolic tangent.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_atanh(float3 in);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse hyperbolic tangent.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_atanh(float4 in);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse tangent divided by PI.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_atanpi(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse tangent divided by PI.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_atanpi(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse tangent divided by PI.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_atanpi(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the inverse tangent divided by PI.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_atanpi(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the cube root.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_cbrt(float);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the cube root.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_cbrt(float2);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the cube root.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_cbrt(float3);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the cube root.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_cbrt(float4);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the cosine.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_cos(float);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the cosine.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_cos(float2);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the cosine.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_cos(float3);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the cosine.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_cos(float4);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the hypebolic cosine.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_cosh(float);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the hypebolic cosine.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_cosh(float2);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the hypebolic cosine.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_cosh(float3);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the hypebolic cosine.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_cosh(float4);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the cosine of the value * PI.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_cospi(float);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the cosine of the value * PI.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_cospi(float2);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the cosine of the value * PI.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_cospi(float3);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the cosine of the value * PI.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_cospi(float4);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Compute the approximate distance between two points.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_distance(float lhs, float rhs);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Compute the approximate distance between two points.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_distance(float2 lhs, float2 rhs);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Compute the approximate distance between two points.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_distance(float3 lhs, float3 rhs);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Compute the approximate distance between two points.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_distance(float4 lhs, float4 rhs);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Compute the approximate division result of two values.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_divide(float lhs, float rhs);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Compute the approximate division result of two values.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_divide(float2 lhs, float2 rhs);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Compute the approximate division result of two values.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_divide(float3 lhs, float3 rhs);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Compute the approximate division result of two values.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_divide(float4 lhs, float4 rhs);
+#endif
+
#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
* Fast approximate exp
@@ -7529,9 +8141,118 @@ extern float3 __attribute__((const, overloadable))native_exp2(float3 v);
extern float4 __attribute__((const, overloadable))native_exp2(float4 v);
#endif
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return (e ^ value) - 1.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_expm1(float);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return (e ^ value) - 1.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_expm1(float2);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return (e ^ value) - 1.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_expm1(float3);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return (e ^ value) - 1.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_expm1(float4);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return native_sqrt(x*x + y*y)
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_hypot(float x, float y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return native_sqrt(x*x + y*y)
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_hypot(float2 x, float2 y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return native_sqrt(x*x + y*y)
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_hypot(float3 x, float3 y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return native_sqrt(x*x + y*y)
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_hypot(float4 x, float4 y);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Compute the approximate length of a vector.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_length(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Compute the approximate length of a vector.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_length(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Compute the approximate length of a vector.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_length(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Compute the approximate length of a vector.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_length(float4 v);
+#endif
+
#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
* Fast approximate log
+ * It is not accurate for values very close to zero.
*
* Supported by API versions 18 and newer.
*/
@@ -7541,6 +8262,7 @@ extern float __attribute__((const, overloadable))native_log(float v);
#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
* Fast approximate log
+ * It is not accurate for values very close to zero.
*
* Supported by API versions 18 and newer.
*/
@@ -7550,6 +8272,7 @@ extern float2 __attribute__((const, overloadable))native_log(float2 v);
#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
* Fast approximate log
+ * It is not accurate for values very close to zero.
*
* Supported by API versions 18 and newer.
*/
@@ -7559,6 +8282,7 @@ extern float3 __attribute__((const, overloadable))native_log(float3 v);
#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
* Fast approximate log
+ * It is not accurate for values very close to zero.
*
* Supported by API versions 18 and newer.
*/
@@ -7568,6 +8292,7 @@ extern float4 __attribute__((const, overloadable))native_log(float4 v);
#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
* Fast approximate log10
+ * It is not accurate for values very close to zero.
*
* Supported by API versions 18 and newer.
*/
@@ -7577,6 +8302,7 @@ extern float __attribute__((const, overloadable))native_log10(float v);
#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
* Fast approximate log10
+ * It is not accurate for values very close to zero.
*
* Supported by API versions 18 and newer.
*/
@@ -7586,6 +8312,7 @@ extern float2 __attribute__((const, overloadable))native_log10(float2 v);
#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
* Fast approximate log10
+ * It is not accurate for values very close to zero.
*
* Supported by API versions 18 and newer.
*/
@@ -7595,15 +8322,53 @@ extern float3 __attribute__((const, overloadable))native_log10(float3 v);
#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
* Fast approximate log10
+ * It is not accurate for values very close to zero.
*
* Supported by API versions 18 and newer.
*/
extern float4 __attribute__((const, overloadable))native_log10(float4 v);
#endif
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the natural logarithm of (v + 1.0f)
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_log1p(float);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the natural logarithm of (v + 1.0f)
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_log1p(float2);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the natural logarithm of (v + 1.0f)
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_log1p(float3);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the natural logarithm of (v + 1.0f)
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_log1p(float4);
+#endif
+
#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
* Fast approximate log2
+ * It is not accurate for values very close to zero.
*
* Supported by API versions 18 and newer.
*/
@@ -7613,6 +8378,7 @@ extern float __attribute__((const, overloadable))native_log2(float v);
#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
* Fast approximate log2
+ * It is not accurate for values very close to zero.
*
* Supported by API versions 18 and newer.
*/
@@ -7622,6 +8388,7 @@ extern float2 __attribute__((const, overloadable))native_log2(float2 v);
#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
* Fast approximate log2
+ * It is not accurate for values very close to zero.
*
* Supported by API versions 18 and newer.
*/
@@ -7631,17 +8398,55 @@ extern float3 __attribute__((const, overloadable))native_log2(float3 v);
#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
* Fast approximate log2
+ * It is not accurate for values very close to zero.
*
* Supported by API versions 18 and newer.
*/
extern float4 __attribute__((const, overloadable))native_log2(float4 v);
#endif
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Normalize a vector.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_normalize(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Normalize a vector.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_normalize(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Normalize a vector.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_normalize(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Normalize a vector.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_normalize(float4 v);
+#endif
+
#if (defined(RS_VERSION) && (RS_VERSION >= 18))
/*
* Fast approximate v ^ y
* v must be between 0.f and 256.f
* y must be between -15.f and 15.f
+ * It is not accurate for values of v very close to zero.
*
* Supported by API versions 18 and newer.
*/
@@ -7653,6 +8458,7 @@ extern float __attribute__((const, overloadable))native_powr(float v, float y);
* Fast approximate v ^ y
* v must be between 0.f and 256.f
* y must be between -15.f and 15.f
+ * It is not accurate for values of v very close to zero.
*
* Supported by API versions 18 and newer.
*/
@@ -7664,6 +8470,7 @@ extern float2 __attribute__((const, overloadable))native_powr(float2 v, float2 y
* Fast approximate v ^ y
* v must be between 0.f and 256.f
* y must be between -15.f and 15.f
+ * It is not accurate for values of v very close to zero.
*
* Supported by API versions 18 and newer.
*/
@@ -7675,12 +8482,425 @@ extern float3 __attribute__((const, overloadable))native_powr(float3 v, float3 y
* Fast approximate v ^ y
* v must be between 0.f and 256.f
* y must be between -15.f and 15.f
+ * It is not accurate for values of v very close to zero.
*
* Supported by API versions 18 and newer.
*/
extern float4 __attribute__((const, overloadable))native_powr(float4 v, float4 y);
#endif
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the approximate reciprocal of a value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_recip(float v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the approximate reciprocal of a value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_recip(float2 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the approximate reciprocal of a value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_recip(float3 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the approximate reciprocal of a value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_recip(float4 v);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Compute the Nth root of a value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_rootn(float v, int n);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Compute the Nth root of a value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_rootn(float2 v, int2 n);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Compute the Nth root of a value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_rootn(float3 v, int3 n);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Compute the Nth root of a value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_rootn(float4 v, int4 n);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return (1 / sqrt(value)).
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_rsqrt(float);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return (1 / sqrt(value)).
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_rsqrt(float2);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return (1 / sqrt(value)).
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_rsqrt(float3);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return (1 / sqrt(value)).
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_rsqrt(float4);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the sine of a value specified in radians.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_sin(float);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the sine of a value specified in radians.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_sin(float2);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the sine of a value specified in radians.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_sin(float3);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the sine of a value specified in radians.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_sin(float4);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the sine and cosine of a value.
+ *
+ * @return sine
+ * @param v The incoming value in radians
+ * @param *cosptr cosptr[0] will be set to the cosine value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((overloadable))native_sincos(float v, float* cosptr);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the sine and cosine of a value.
+ *
+ * @return sine
+ * @param v The incoming value in radians
+ * @param *cosptr cosptr[0] will be set to the cosine value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((overloadable))native_sincos(float2 v, float2* cosptr);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the sine and cosine of a value.
+ *
+ * @return sine
+ * @param v The incoming value in radians
+ * @param *cosptr cosptr[0] will be set to the cosine value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((overloadable))native_sincos(float3 v, float3* cosptr);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the sine and cosine of a value.
+ *
+ * @return sine
+ * @param v The incoming value in radians
+ * @param *cosptr cosptr[0] will be set to the cosine value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((overloadable))native_sincos(float4 v, float4* cosptr);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the hyperbolic sine of a value specified in radians.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_sinh(float);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the hyperbolic sine of a value specified in radians.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_sinh(float2);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the hyperbolic sine of a value specified in radians.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_sinh(float3);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the hyperbolic sine of a value specified in radians.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_sinh(float4);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the sin(v * PI).
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_sinpi(float);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the sin(v * PI).
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_sinpi(float2);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the sin(v * PI).
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_sinpi(float3);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the sin(v * PI).
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_sinpi(float4);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the aproximate sqrt(v).
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_sqrt(float);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the aproximate sqrt(v).
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_sqrt(float2);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the aproximate sqrt(v).
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_sqrt(float3);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the aproximate sqrt(v).
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_sqrt(float4);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the tangent of a value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_tan(float);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the tangent of a value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_tan(float2);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the tangent of a value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_tan(float3);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the tangent of a value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_tan(float4);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the hyperbolic tangent of a value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_tanh(float);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the hyperbolic tangent of a value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_tanh(float2);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the hyperbolic tangent of a value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_tanh(float3);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return the hyperbolic tangent of a value.
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_tanh(float4);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return tan(v * PI)
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float __attribute__((const, overloadable))native_tanpi(float);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return tan(v * PI)
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float2 __attribute__((const, overloadable))native_tanpi(float2);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return tan(v * PI)
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float3 __attribute__((const, overloadable))native_tanpi(float3);
+#endif
+
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+/*
+ * Return tan(v * PI)
+ *
+ * Supported by API versions 21 and newer.
+ */
+extern float4 __attribute__((const, overloadable))native_tanpi(float4);
+#endif
+
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
* Return the next floating point number from x towards y.
@@ -7939,7 +9159,7 @@ extern float4 __attribute__((const, overloadable))remainder(float4 x, float4 y);
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the quotient and the remainder of b/c
+ * Return the quotient and the remainder of b/c. Only the sign and lowest three bits of the quotient are guaranteed to be accurate.
*
* Supported by API versions 9 and newer.
*/
@@ -7948,7 +9168,7 @@ extern float __attribute__((overloadable))remquo(float b, float c, int* d);
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the quotient and the remainder of b/c
+ * Return the quotient and the remainder of b/c. Only the sign and lowest three bits of the quotient are guaranteed to be accurate.
*
* Supported by API versions 9 and newer.
*/
@@ -7957,7 +9177,7 @@ extern float2 __attribute__((overloadable))remquo(float2 b, float2 c, int2* d);
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the quotient and the remainder of b/c
+ * Return the quotient and the remainder of b/c. Only the sign and lowest three bits of the quotient are guaranteed to be accurate.
*
* Supported by API versions 9 and newer.
*/
@@ -7966,7 +9186,7 @@ extern float3 __attribute__((overloadable))remquo(float3 b, float3 c, int3* d);
#if (defined(RS_VERSION) && (RS_VERSION >= 9))
/*
- * Return the quotient and the remainder of b/c
+ * Return the quotient and the remainder of b/c. Only the sign and lowest three bits of the quotient are guaranteed to be accurate.
*
* Supported by API versions 9 and newer.
*/
@@ -8449,38 +9669,38 @@ extern float3 __attribute__((const, overloadable))step(float3 edge, float v);
extern float4 __attribute__((const, overloadable))step(float4 edge, float v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* if (v < edge)
* return 0.f;
* else
* return 1.f;
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern float2 __attribute__((const, overloadable))step(float edge, float2 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* if (v < edge)
* return 0.f;
* else
* return 1.f;
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern float3 __attribute__((const, overloadable))step(float edge, float3 v);
#endif
-#if (defined(RS_VERSION) && (RS_VERSION >= 20))
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
/*
* if (v < edge)
* return 0.f;
* else
* return 1.f;
*
- * Supported by API versions 20 and newer.
+ * Supported by API versions 21 and newer.
*/
extern float4 __attribute__((const, overloadable))step(float edge, float4 v);
#endif
diff --git a/renderscript/include/rs_matrix.rsh b/renderscript/include/rs_matrix.rsh
index ebff7f4..34b9532 100644
--- a/renderscript/include/rs_matrix.rsh
+++ b/renderscript/include/rs_matrix.rsh
@@ -15,8 +15,35 @@
*/
/** @file rs_matrix.rsh
- * \brief Matrix routines
+ * \brief Matrix functions.
*
+ * These functions let you manipulate square matrices of rank 2x2, 3x3, and 4x4.
+ * They are particularly useful for graphical transformations and are
+ * compatible with OpenGL.
+ *
+ * A few general notes:
+ *
+ * \li We use a zero-based index for rows and columns. E.g. the last element of
+ * a \ref rs_matrix4x4 is found at (3, 3).
+ *
+ * \li RenderScript uses column-based vectors. Transforming a vector is done by
+ * postmultiplying the vector, e.g. <em>(matrix * vector)</em>, as provided by
+ * \ref rsMatrixMultiply.
+ *
+ * \li To create a transformation matrix that performs two transformations at
+ * once, multiply the two source matrices, with the first transformation as the
+ * right argument. E.g. to create a transformation matrix that applies the
+ * transformation \e s1 followed by \e s2, call
+ * </c>rsMatrixLoadMultiply(&combined, &s2, &s1)</c>.
+ * This derives from <em>s2 * (s1 * v)</em>, which is <em>(s2 * s1) * v</em>.
+ *
+ * \li We have two style of functions to create transformation matrices:
+ * rsMatrixLoad<em>Transformation</em> and rsMatrix<em>Transformation</em>. The
+ * former style simply stores the transformation matrix in the first argument.
+ * The latter modifies a pre-existing transformation matrix so that the new
+ * transformation happens first. E.g. if you call \ref rsMatrixTranslate
+ * on a matrix that already does a scaling, the resulting matrix when applied
+ * to a vector will first do the translation then the scaling.
*
*/
@@ -24,54 +51,60 @@
#define __RS_MATRIX_RSH__
/**
- * Set one element of a matrix.
+ * Set an element of a matrix.
+ *
+ * @param m The matrix that will be modified.
+ * @param col The zero-based column of the element to be set.
+ * @param row The zero-based row of the element to be set.
+ * @param v The value to set.
*
- * @param m The matrix to be set
- * @param row
- * @param col
- * @param v
+ * \warning The order of the column and row parameters may be
+ * unexpected.
*
* @return void
*/
_RS_RUNTIME void __attribute__((overloadable))
-rsMatrixSet(rs_matrix4x4 *m, uint32_t row, uint32_t col, float v);
+rsMatrixSet(rs_matrix4x4 *m, uint32_t col, uint32_t row, float v);
/**
* \overload
*/
_RS_RUNTIME void __attribute__((overloadable))
-rsMatrixSet(rs_matrix3x3 *m, uint32_t row, uint32_t col, float v);
+rsMatrixSet(rs_matrix3x3 *m, uint32_t col, uint32_t row, float v);
/**
* \overload
*/
_RS_RUNTIME void __attribute__((overloadable))
-rsMatrixSet(rs_matrix2x2 *m, uint32_t row, uint32_t col, float v);
+rsMatrixSet(rs_matrix2x2 *m, uint32_t col, uint32_t row, float v);
/**
- * Get one element of a matrix.
+ * Returns one element of a matrix.
+ *
+ * @param m The matrix to extract the element from.
+ * @param col The zero-based column of the element to be extracted.
+ * @param row The zero-based row of the element to extracted.
*
- * @param m The matrix to read from
- * @param row
- * @param col
+ * \warning The order of the column and row parameters may be
+ * unexpected.
*
* @return float
*/
_RS_RUNTIME float __attribute__((overloadable))
-rsMatrixGet(const rs_matrix4x4 *m, uint32_t row, uint32_t col);
+rsMatrixGet(const rs_matrix4x4 *m, uint32_t col, uint32_t row);
/**
* \overload
*/
_RS_RUNTIME float __attribute__((overloadable))
-rsMatrixGet(const rs_matrix3x3 *m, uint32_t row, uint32_t col);
+rsMatrixGet(const rs_matrix3x3 *m, uint32_t col, uint32_t row);
/**
* \overload
*/
_RS_RUNTIME float __attribute__((overloadable))
-rsMatrixGet(const rs_matrix2x2 *m, uint32_t row, uint32_t col);
+rsMatrixGet(const rs_matrix2x2 *m, uint32_t col, uint32_t row);
/**
* Set the elements of a matrix to the identity matrix.
*
- * @param m
+ * @param m The matrix to set.
*/
extern void __attribute__((overloadable)) rsMatrixLoadIdentity(rs_matrix4x4 *m);
/**
@@ -86,7 +119,13 @@ extern void __attribute__((overloadable)) rsMatrixLoadIdentity(rs_matrix2x2 *m);
/**
* Set the elements of a matrix from an array of floats.
*
- * @param m
+ * The array of floats should be in row-major order, i.e. the element a
+ * <em>row 0, column 0</em> should be first, followed by the element at
+ * <em>row 0, column 1</em>, etc.
+ *
+ * @param m The matrix to set.
+ * @param v The array of values to set the matrix to. These arrays should be
+ * 4, 9, or 16 floats long, depending on the matrix size.
*/
extern void __attribute__((overloadable)) rsMatrixLoad(rs_matrix4x4 *m, const float *v);
/**
@@ -98,18 +137,29 @@ extern void __attribute__((overloadable)) rsMatrixLoad(rs_matrix3x3 *m, const fl
*/
extern void __attribute__((overloadable)) rsMatrixLoad(rs_matrix2x2 *m, const float *v);
/**
- * \overload
+ * Set the elements of a matrix from another matrix.
+ *
+ * If the source matrix is smaller than the destination, the rest of the
+ * destination is filled with elements of the identity matrix. E.g.
+ * loading a rs_matrix2x2 into a rs_matrix4x4 will give:
+ *
+ * \htmlonly<table>
+ * <tr><td>m00</td><td>m01</td><td>0.0</td><td>0.0</td></tr>
+ * <tr><td>m10</td><td>m11</td><td>0.0</td><td>0.0</td></tr>
+ * <tr><td>0.0</td><td>0.0</td><td>1.0</td><td>0.0</td></tr>
+ * <tr><td>0.0</td><td>0.0</td><td>0.0</td><td>1.0</td></tr>
+ * </table>\endhtmlonly
+ *
+ * @param m The matrix to set.
+ * @param v The source matrix.
*/
extern void __attribute__((overloadable)) rsMatrixLoad(rs_matrix4x4 *m, const rs_matrix4x4 *v);
/**
* \overload
*/
extern void __attribute__((overloadable)) rsMatrixLoad(rs_matrix4x4 *m, const rs_matrix3x3 *v);
-
/**
- * Set the elements of a matrix from another matrix.
- *
- * @param m
+ * \overload
*/
extern void __attribute__((overloadable)) rsMatrixLoad(rs_matrix4x4 *m, const rs_matrix2x2 *v);
/**
@@ -124,11 +174,19 @@ extern void __attribute__((overloadable)) rsMatrixLoad(rs_matrix2x2 *m, const rs
/**
* Load a rotation matrix.
*
- * @param m
- * @param rot
- * @param x
- * @param y
- * @param z
+ * This function creates a rotation matrix. The axis of rotation is the
+ * <em>(x, y, z)</em> vector.
+ *
+ * To rotate a vector, multiply the vector by the created matrix
+ * using \ref rsMatrixMultiply.
+ *
+ * See http://en.wikipedia.org/wiki/Rotation_matrix .
+ *
+ * @param m The matrix to set.
+ * @param rot How much rotation to do, in degrees.
+ * @param x The x component of the vector that is the axis of rotation.
+ * @param y The y component of the vector that is the axis of rotation.
+ * @param z The z component of the vector that is the axis of rotation.
*/
extern void __attribute__((overloadable))
rsMatrixLoadRotate(rs_matrix4x4 *m, float rot, float x, float y, float z);
@@ -136,10 +194,16 @@ rsMatrixLoadRotate(rs_matrix4x4 *m, float rot, float x, float y, float z);
/**
* Load a scale matrix.
*
- * @param m
- * @param x
- * @param y
- * @param z
+ * This function creates a scaling matrix, where each component of a
+ * vector is multiplied by a number. This number can be negative.
+ *
+ * To scale a vector, multiply the vector by the created matrix
+ * using \ref rsMatrixMultiply.
+ *
+ * @param m The matrix to set.
+ * @param x The multiple to scale the x components by.
+ * @param y The multiple to scale the y components by.
+ * @param z The multiple to scale the z components by.
*/
extern void __attribute__((overloadable))
rsMatrixLoadScale(rs_matrix4x4 *m, float x, float y, float z);
@@ -147,20 +211,38 @@ rsMatrixLoadScale(rs_matrix4x4 *m, float x, float y, float z);
/**
* Load a translation matrix.
*
- * @param m
- * @param x
- * @param y
- * @param z
+ * This function creates a translation matrix, where a
+ * number is added to each element of a vector.
+ *
+ * To translate a vector, multiply the vector by the created matrix
+ * using \ref rsMatrixMultiply.
+ *
+ * @param m The matrix to set.
+ * @param x The number to add to each x component.
+ * @param y The number to add to each y component.
+ * @param z The number to add to each z component.
*/
extern void __attribute__((overloadable))
rsMatrixLoadTranslate(rs_matrix4x4 *m, float x, float y, float z);
/**
- * Multiply two matrix (lhs, rhs) and place the result in m.
+ * Multiply two matrices.
+ *
+ * Sets \e m to the matrix product of <em>lhs * rhs</em>.
*
- * @param m
- * @param lhs
- * @param rhs
+ * To combine two 4x4 transformaton matrices, multiply the second transformation matrix
+ * by the first transformation matrix. E.g. to create a transformation matrix that applies
+ * the transformation \e s1 followed by \e s2, call
+ * </c>rsMatrixLoadMultiply(&combined, &s2, &s1)</c>.
+ *
+ * \warning Prior to version 21, storing the result back into right matrix is not supported and
+ * will result in undefined behavior. Use rsMatrixMulitply instead. E.g. instead of doing
+ * rsMatrixLoadMultiply (&m2r, &m2r, &m2l), use rsMatrixMultiply (&m2r, &m2l).
+ * rsMatrixLoadMultiply (&m2l, &m2r, &m2l) works as expected.
+ *
+ * @param m The matrix to set.
+ * @param lhs The left matrix of the product.
+ * @param rhs The right matrix of the product.
*/
extern void __attribute__((overloadable))
rsMatrixLoadMultiply(rs_matrix4x4 *m, const rs_matrix4x4 *lhs, const rs_matrix4x4 *rhs);
@@ -176,10 +258,16 @@ extern void __attribute__((overloadable))
rsMatrixLoadMultiply(rs_matrix2x2 *m, const rs_matrix2x2 *lhs, const rs_matrix2x2 *rhs);
/**
- * Multiply the matrix m by rhs and place the result back into m.
+ * Multiply a matrix into another one.
+ *
+ * Sets \e m to the matrix product <em>m * rhs</em>.
*
- * @param m (lhs)
- * @param rhs
+ * When combining two 4x4 transformation matrices using this function, the resulting
+ * matrix will correspond to performing the \e rhs transformation first followed by
+ * the original \e m transformation.
+ *
+ * @param m The left matrix of the product and the matrix to be set.
+ * @param rhs The right matrix of the product.
*/
extern void __attribute__((overloadable))
rsMatrixMultiply(rs_matrix4x4 *m, const rs_matrix4x4 *rhs);
@@ -195,43 +283,73 @@ extern void __attribute__((overloadable))
rsMatrixMultiply(rs_matrix2x2 *m, const rs_matrix2x2 *rhs);
/**
- * Multiple matrix m with a rotation matrix
+ * Multiply the matrix \e m with a rotation matrix.
+ *
+ * This function modifies a transformation matrix to first do a rotation.
+ * The axis of rotation is the <em>(x, y, z)</em> vector.
*
- * @param m
- * @param rot
- * @param x
- * @param y
- * @param z
+ * To apply this combined transformation to a vector, multiply
+ * the vector by the created matrix using \ref rsMatrixMultiply.
+ *
+ * @param m The matrix to modify.
+ * @param rot How much rotation to do, in degrees.
+ * @param x The x component of the vector that is the axis of rotation.
+ * @param y The y component of the vector that is the axis of rotation.
+ * @param z The z component of the vector that is the axis of rotation.
*/
extern void __attribute__((overloadable))
rsMatrixRotate(rs_matrix4x4 *m, float rot, float x, float y, float z);
/**
- * Multiple matrix m with a scale matrix
+ * Multiply the matrix \e m with a scaling matrix.
+ *
+ * This function modifies a transformation matrix to first do a scaling.
+ * When scaling, each component of a vector is multiplied by a number.
+ * This number can be negative.
*
- * @param m
- * @param x
- * @param y
- * @param z
+ * To apply this combined transformation to a vector, multiply
+ * the vector by the created matrix using \ref rsMatrixMultiply.
+ *
+ * @param m The matrix to modify.
+ * @param x The multiple to scale the x components by.
+ * @param y The multiple to scale the y components by.
+ * @param z The multiple to scale the z components by.
*/
extern void __attribute__((overloadable))
rsMatrixScale(rs_matrix4x4 *m, float x, float y, float z);
/**
- * Multiple matrix m with a translation matrix
+ * Multiply the matrix \e m with a translation matrix.
+ *
+ * This function modifies a transformation matrix to first
+ * do a translation. When translating, a number is added
+ * to each component of a vector.
+ *
+ * To apply this combined transformation to a vector, multiply
+ * the vector by the created matrix using \ref rsMatrixMultiply.
*
- * @param m
- * @param x
- * @param y
- * @param z
+ * @param m The matrix to modify.
+ * @param x The number to add to each x component.
+ * @param y The number to add to each y component.
+ * @param z The number to add to each z component.
*/
extern void __attribute__((overloadable))
rsMatrixTranslate(rs_matrix4x4 *m, float x, float y, float z);
/**
- * Load an Ortho projection matrix constructed from the 6 planes
+ * Load an orthographic projection matrix.
*
- * @param m
+ * Constructs an orthographic projection matrix, transforming the box
+ * identified by the six clipping planes <em>left, right, bottom, top,
+ * near, far</em> into a unit cube with a corner at
+ * <em>(-1, -1, -1)</em> and the opposite at <em>(1, 1, 1)</em>.
+ *
+ * To apply this projection to a vector, multiply the vector by the
+ * created matrix using \ref rsMatrixMultiply.
+ *
+ * See https://en.wikipedia.org/wiki/Orthographic_projection .
+ *
+ * @param m The matrix to set.
* @param left
* @param right
* @param bottom
@@ -243,9 +361,16 @@ extern void __attribute__((overloadable))
rsMatrixLoadOrtho(rs_matrix4x4 *m, float left, float right, float bottom, float top, float near, float far);
/**
- * Load an Frustum projection matrix constructed from the 6 planes
+ * Load a frustum projection matrix.
+ *
+ * Constructs a frustum projection matrix, transforming the box
+ * identified by the six clipping planes <em>left, right, bottom, top,
+ * near, far</em>.
*
- * @param m
+ * To apply this projection to a vector, multiply the vector by the
+ * created matrix using \ref rsMatrixMultiply.
+ *
+ * @param m The matrix to set.
* @param left
* @param right
* @param bottom
@@ -257,21 +382,36 @@ extern void __attribute__((overloadable))
rsMatrixLoadFrustum(rs_matrix4x4 *m, float left, float right, float bottom, float top, float near, float far);
/**
- * Load an perspective projection matrix constructed from the 6 planes
+ * Load a perspective projection matrix.
+ *
+ * Constructs a perspective projection matrix, assuming a symmetrical field of view.
+ *
+ * To apply this projection to a vector, multiply the vector by the
+ * created matrix using \ref rsMatrixMultiply.
*
- * @param m
+ * @param m The matrix to set.
* @param fovy Field of view, in degrees along the Y axis.
* @param aspect Ratio of x / y.
- * @param near
- * @param far
+ * @param near The near clipping plane.
+ * @param far The far clipping plane.
*/
extern void __attribute__((overloadable))
rsMatrixLoadPerspective(rs_matrix4x4* m, float fovy, float aspect, float near, float far);
#if !defined(RS_VERSION) || (RS_VERSION < 14)
/**
- * Multiply a vector by a matrix and return the result vector.
- * API version 10-13
+ * Multiply a vector by a matrix.
+ *
+ * Returns the post-multiplication of the vector by the matrix, ie. <em>m * in</em>.
+ *
+ * When multiplying a \e float3 to a \e rs_matrix4x4, the vector is expanded with (1).
+ *
+ * When multiplying a \e float2 to a \e rs_matrix4x4, the vector is expanded with (0, 1).
+ *
+ * When multiplying a \e float2 to a \e rs_matrix3x3, the vector is expanded with (0).
+ *
+ * This function is available in API version 10-13. Starting with API 14,
+ * the function takes a const matrix as the first argument.
*/
_RS_RUNTIME float4 __attribute__((overloadable))
rsMatrixMultiply(rs_matrix4x4 *m, float4 in);
@@ -307,8 +447,17 @@ _RS_RUNTIME float2 __attribute__((overloadable))
rsMatrixMultiply(rs_matrix2x2 *m, float2 in);
#else
/**
- * Multiply a vector by a matrix and return the result vector.
- * API version 14+
+ * Multiply a vector by a matrix.
+ *
+ * Returns the post-multiplication of the vector of the matrix, i.e. <em>m * in</em>.
+ *
+ * When multiplying a \e float3 to a \e rs_matrix4x4, the vector is expanded with (1).
+ *
+ * When multiplying a \e float2 to a \e rs_matrix4x4, the vector is expanded with (0, 1).
+ *
+ * When multiplying a \e float2 to a \e rs_matrix3x3, the vector is expanded with (0).
+ *
+ * This function is available starting with API version 14.
*/
_RS_RUNTIME float4 __attribute__((overloadable))
rsMatrixMultiply(const rs_matrix4x4 *m, float4 in);
@@ -346,23 +495,28 @@ rsMatrixMultiply(const rs_matrix2x2 *m, float2 in);
/**
- * Returns true if the matrix was successfully inversed
+ * Inverts a matrix in place.
+ *
+ * Returns true if the matrix was successfully inverted.
*
- * @param m
+ * @param m The matrix to invert.
*/
extern bool __attribute__((overloadable)) rsMatrixInverse(rs_matrix4x4 *m);
/**
- * Returns true if the matrix was successfully inversed and transposed.
+ * Inverts and transpose a matrix in place.
+ *
+ * The matrix is first inverted then transposed.
+ * Returns true if the matrix was successfully inverted.
*
- * @param m
+ * @param m The matrix to modify.
*/
extern bool __attribute__((overloadable)) rsMatrixInverseTranspose(rs_matrix4x4 *m);
/**
- * Transpose the matrix m.
+ * Transpose the matrix m in place.
*
- * @param m
+ * @param m The matrix to transpose.
*/
extern void __attribute__((overloadable)) rsMatrixTranspose(rs_matrix4x4 *m);
/**
diff --git a/renderscript/include/rs_time.rsh b/renderscript/include/rs_time.rsh
index 1e6ab99..abcb88b 100644
--- a/renderscript/include/rs_time.rsh
+++ b/renderscript/include/rs_time.rsh
@@ -28,7 +28,11 @@
* Calendar time interpreted as seconds elapsed since the Epoch (00:00:00 on
* January 1, 1970, Coordinated Universal Time (UTC)).
*/
+#ifndef __LP64__
typedef int rs_time_t;
+#else
+typedef long rs_time_t;
+#endif
/**
* Data structure for broken-down time components.
diff --git a/renderscript/include/rs_types.rsh b/renderscript/include/rs_types.rsh
index 33cd7da..f1fc60b 100644
--- a/renderscript/include/rs_types.rsh
+++ b/renderscript/include/rs_types.rsh
@@ -72,7 +72,11 @@ typedef int int32_t;
/**
* 64 bit integer type
*/
-typedef long long int64_t;
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+ typedef long int64_t;
+#else
+ typedef long long int64_t;
+#endif
/**
* 8 bit unsigned integer type
*/
@@ -88,7 +92,11 @@ typedef unsigned int uint32_t;
/**
* 64 bit unsigned integer type
*/
-typedef unsigned long long uint64_t;
+#if (defined(RS_VERSION) && (RS_VERSION >= 21))
+ typedef unsigned long uint64_t;
+#else
+ typedef unsigned long long uint64_t;
+#endif
/**
* 8 bit unsigned integer type
*/
diff --git a/renderscript/lib/bc/armeabi-v7a/libclcore.bc b/renderscript/lib/bc/armeabi-v7a/libclcore.bc
index c918fc8..34cbe63 100644
--- a/renderscript/lib/bc/armeabi-v7a/libclcore.bc
+++ b/renderscript/lib/bc/armeabi-v7a/libclcore.bc
Binary files differ
diff --git a/renderscript/lib/bc/mips/libclcore.bc b/renderscript/lib/bc/mips/libclcore.bc
index 7f1e217..34cbe63 100644
--- a/renderscript/lib/bc/mips/libclcore.bc
+++ b/renderscript/lib/bc/mips/libclcore.bc
Binary files differ
diff --git a/renderscript/lib/bc/x86/libclcore.bc b/renderscript/lib/bc/x86/libclcore.bc
index 808163a..76ebb47 100644
--- a/renderscript/lib/bc/x86/libclcore.bc
+++ b/renderscript/lib/bc/x86/libclcore.bc
Binary files differ
diff --git a/renderscript/lib/intermediates/armeabi-v7a/libc.so b/renderscript/lib/intermediates/armeabi-v7a/libc.so
index f844e7f..04aee46 100755..100644
--- a/renderscript/lib/intermediates/armeabi-v7a/libc.so
+++ b/renderscript/lib/intermediates/armeabi-v7a/libc.so
Binary files differ
diff --git a/renderscript/lib/intermediates/armeabi-v7a/libm.so b/renderscript/lib/intermediates/armeabi-v7a/libm.so
index 70e8ded..01d9d4f 100755..100644
--- a/renderscript/lib/intermediates/armeabi-v7a/libm.so
+++ b/renderscript/lib/intermediates/armeabi-v7a/libm.so
Binary files differ
diff --git a/renderscript/lib/intermediates/mips/libc.so b/renderscript/lib/intermediates/mips/libc.so
index 9e51632..e3613f6 100755..100644
--- a/renderscript/lib/intermediates/mips/libc.so
+++ b/renderscript/lib/intermediates/mips/libc.so
Binary files differ
diff --git a/renderscript/lib/intermediates/mips/libm.so b/renderscript/lib/intermediates/mips/libm.so
index 7bc6a67..2d989a2 100755..100644
--- a/renderscript/lib/intermediates/mips/libm.so
+++ b/renderscript/lib/intermediates/mips/libm.so
Binary files differ
diff --git a/renderscript/lib/intermediates/x86/libc.so b/renderscript/lib/intermediates/x86/libc.so
index b3c117c..fb51f4f 100755..100644
--- a/renderscript/lib/intermediates/x86/libc.so
+++ b/renderscript/lib/intermediates/x86/libc.so
Binary files differ
diff --git a/renderscript/lib/intermediates/x86/libm.so b/renderscript/lib/intermediates/x86/libm.so
index 2a4e2df..3b4c732 100755..100644
--- a/renderscript/lib/intermediates/x86/libm.so
+++ b/renderscript/lib/intermediates/x86/libm.so
Binary files differ
diff --git a/renderscript/lib/packaged/armeabi-v7a/libRSSupport.so b/renderscript/lib/packaged/armeabi-v7a/libRSSupport.so
index b414f7c..c12750f 100755..100644
--- a/renderscript/lib/packaged/armeabi-v7a/libRSSupport.so
+++ b/renderscript/lib/packaged/armeabi-v7a/libRSSupport.so
Binary files differ
diff --git a/renderscript/lib/packaged/armeabi-v7a/librsjni.so b/renderscript/lib/packaged/armeabi-v7a/librsjni.so
index 97617d3..f376df6 100755..100644
--- a/renderscript/lib/packaged/armeabi-v7a/librsjni.so
+++ b/renderscript/lib/packaged/armeabi-v7a/librsjni.so
Binary files differ
diff --git a/renderscript/lib/packaged/mips/libRSSupport.so b/renderscript/lib/packaged/mips/libRSSupport.so
index 710395a..d195cb7 100755..100644
--- a/renderscript/lib/packaged/mips/libRSSupport.so
+++ b/renderscript/lib/packaged/mips/libRSSupport.so
Binary files differ
diff --git a/renderscript/lib/packaged/mips/librsjni.so b/renderscript/lib/packaged/mips/librsjni.so
index 89ce92f..54e0e11 100755..100644
--- a/renderscript/lib/packaged/mips/librsjni.so
+++ b/renderscript/lib/packaged/mips/librsjni.so
Binary files differ
diff --git a/renderscript/lib/packaged/x86/libRSSupport.so b/renderscript/lib/packaged/x86/libRSSupport.so
index 0eabdce..a50fbd9 100755..100644
--- a/renderscript/lib/packaged/x86/libRSSupport.so
+++ b/renderscript/lib/packaged/x86/libRSSupport.so
Binary files differ
diff --git a/renderscript/lib/packaged/x86/librsjni.so b/renderscript/lib/packaged/x86/librsjni.so
index aecc9f5..5a5bb7f 100755..100644
--- a/renderscript/lib/packaged/x86/librsjni.so
+++ b/renderscript/lib/packaged/x86/librsjni.so
Binary files differ
diff --git a/renderscript/lib/renderscript-v8.jar b/renderscript/lib/renderscript-v8.jar
index 3118583..47a085c 100644
--- a/renderscript/lib/renderscript-v8.jar
+++ b/renderscript/lib/renderscript-v8.jar
Binary files differ
diff --git a/source.properties b/source.properties
index 2e677b4..4c5a442 100644
--- a/source.properties
+++ b/source.properties
@@ -1,4 +1,6 @@
Pkg.UserSrc=false
+//Pkg.Revision=21.0.0
+
Archive.Os=MACOSX
Pkg.Revision=22.0.0
Archive.Arch=ANY
diff --git a/zipalign b/zipalign
index dc49921..706ca4c 100755
--- a/zipalign
+++ b/zipalign
Binary files differ