aboutsummaryrefslogtreecommitdiff
path: root/fixedpoint
diff options
context:
space:
mode:
Diffstat (limited to 'fixedpoint')
-rw-r--r--fixedpoint/fixedpoint.h30
-rw-r--r--fixedpoint/fixedpoint_avx.h168
-rw-r--r--fixedpoint/fixedpoint_sse.h52
-rw-r--r--fixedpoint/fixedpoint_wasmsimd.h381
4 files changed, 598 insertions, 33 deletions
diff --git a/fixedpoint/fixedpoint.h b/fixedpoint/fixedpoint.h
index 58e8050..56e95c0 100644
--- a/fixedpoint/fixedpoint.h
+++ b/fixedpoint/fixedpoint.h
@@ -95,12 +95,13 @@ tIntegerType Add(tIntegerType a, tIntegerType b) {
return a + b;
}
-// Integer subtraction. Not saturating. Overflow is undefined behavior.
+// Integer multiplication. Not saturating. Overflow is undefined behavior.
template <typename tIntegerType>
tIntegerType Mul(tIntegerType a, tIntegerType b) {
return a * b;
}
+// Integer subtraction. Not saturating. Overflow is undefined behavior.
template <typename tIntegerType>
tIntegerType Sub(tIntegerType a, tIntegerType b) {
return a - b;
@@ -268,6 +269,16 @@ inline std::int16_t SaturatingAdd(std::int16_t a, std::int16_t b) {
std::max(static_cast<std::int32_t>(-32768), sum)));
}
+template <>
+inline std::int8_t SaturatingAdd(std::int8_t a, std::int8_t b) {
+ std::int16_t a16 = a;
+ std::int16_t b16 = b;
+ std::int16_t sum = a16 + b16;
+ return static_cast<std::int8_t>(std::min(
+ static_cast<int16_t>(std::numeric_limits<int8_t>::max()),
+ std::max(static_cast<int16_t>(std::numeric_limits<int8_t>::min()), sum)));
+}
+
// Returns a+b, saturating if the integers are 16bit or narrower,
// otherwise just a plain addition.
template <typename IntegerType, bool Is16Bit>
@@ -767,13 +778,14 @@ FixedPoint<tRawType, 0> exp_on_negative_values(
result * kMultiplier, result); \
}
- GEMMLOWP_EXP_BARREL_SHIFTER(-2, 1672461947);
- GEMMLOWP_EXP_BARREL_SHIFTER(-1, 1302514674);
- GEMMLOWP_EXP_BARREL_SHIFTER(+0, 790015084);
- GEMMLOWP_EXP_BARREL_SHIFTER(+1, 290630308);
- GEMMLOWP_EXP_BARREL_SHIFTER(+2, 39332535);
- GEMMLOWP_EXP_BARREL_SHIFTER(+3, 720401);
- GEMMLOWP_EXP_BARREL_SHIFTER(+4, 242);
+ // Constants below are Q0 representations of negative exp fractionals:
+ GEMMLOWP_EXP_BARREL_SHIFTER(-2, 1672461947); // exp(-1/4)
+ GEMMLOWP_EXP_BARREL_SHIFTER(-1, 1302514674); // exp(-1/2)
+ GEMMLOWP_EXP_BARREL_SHIFTER(+0, 790015084); // exp(-1)
+ GEMMLOWP_EXP_BARREL_SHIFTER(+1, 290630308); // exp(-2)
+ GEMMLOWP_EXP_BARREL_SHIFTER(+2, 39332535); // exp(-4)
+ GEMMLOWP_EXP_BARREL_SHIFTER(+3, 720401); // exp(-8)
+ GEMMLOWP_EXP_BARREL_SHIFTER(+4, 242); // exp(-16)
#undef GEMMLOWP_EXP_BARREL_SHIFTER
@@ -895,6 +907,8 @@ FixedPoint<tRawType, 0> logistic(FixedPoint<tRawType, tIntegerBits> a) {
#include "./fixedpoint_sse.h"
#elif defined(GEMMLOWP_MSA)
#include "./fixedpoint_msa.h"
+#elif defined(GEMMLOWP_WASMSIMD)
+#include "./fixedpoint_wasmsimd.h"
#endif
#endif // GEMMLOWP_INTERNAL_FIXEDPOINT_H_
diff --git a/fixedpoint/fixedpoint_avx.h b/fixedpoint/fixedpoint_avx.h
index 1816386..f3fe732 100644
--- a/fixedpoint/fixedpoint_avx.h
+++ b/fixedpoint/fixedpoint_avx.h
@@ -17,69 +17,139 @@
#ifndef GEMMLOWP_INTERNAL_FIXEDPOINT_AVX_H_
#define GEMMLOWP_INTERNAL_FIXEDPOINT_AVX_H_
-#include <smmintrin.h>
+#include <immintrin.h>
#include "fixedpoint.h"
#include "fixedpoint_sse.h"
namespace gemmlowp {
+struct int16x16_m256i {
+ __m256i v;
+};
+
+// Keep int16x16_m256i trivially constructible/destructible and provide
+// easily optimized helper function.
+inline int16x16_m256i to_int16x16_m256i(__m256i w) {
+ int16x16_m256i r;
+ r.v = w;
+ return r;
+}
+
template <>
struct FixedPointRawTypeTraits<__m256i> {
typedef std::int32_t ScalarRawType;
+ // TODO: This can actually support up to 8 lanes, so we should either
+ // change to 8 or create int32x8_m256i struct to handle that case.
static const int kLanes = 4;
};
template <>
+struct FixedPointRawTypeTraits<int16x16_m256i> {
+ typedef std::int16_t ScalarRawType;
+ static const int kLanes = 16;
+};
+
+template <>
inline __m256i BitAnd(__m256i a, __m256i b) {
return _mm256_and_si256(a, b);
}
template <>
+inline int16x16_m256i BitAnd(int16x16_m256i a, int16x16_m256i b) {
+ return to_int16x16_m256i(_mm256_and_si256(a.v, b.v));
+}
+
+template <>
inline __m256i BitOr(__m256i a, __m256i b) {
return _mm256_or_si256(a, b);
}
template <>
+inline int16x16_m256i BitOr(int16x16_m256i a, int16x16_m256i b) {
+ return to_int16x16_m256i(_mm256_or_si256(a.v, b.v));
+}
+
+template <>
inline __m256i BitXor(__m256i a, __m256i b) {
return _mm256_xor_si256(a, b);
}
template <>
+inline int16x16_m256i BitXor(int16x16_m256i a, int16x16_m256i b) {
+ return to_int16x16_m256i(_mm256_xor_si256(a.v, b.v));
+}
+
+template <>
inline __m256i BitNot(__m256i a) {
return _mm256_andnot_si256(a, _mm256_set1_epi32(-1));
}
template <>
+inline int16x16_m256i BitNot(int16x16_m256i a) {
+ return to_int16x16_m256i(_mm256_andnot_si256(a.v, _mm256_set1_epi16(-1)));
+}
+
+template <>
inline __m256i Add(__m256i a, __m256i b) {
return _mm256_add_epi32(a, b);
}
template <>
+inline int16x16_m256i Add(int16x16_m256i a, int16x16_m256i b) {
+ return to_int16x16_m256i(_mm256_add_epi16(a.v, b.v));
+}
+
+template <>
inline __m256i Mul(__m256i a, __m256i b) {
return _mm256_mullo_epi32(a, b);
}
template <>
+inline int16x16_m256i Mul(int16x16_m256i a, int16x16_m256i b) {
+ return to_int16x16_m256i(_mm256_mullo_epi16(a.v, b.v));
+}
+
+template <>
inline __m256i Sub(__m256i a, __m256i b) {
return _mm256_sub_epi32(a, b);
}
template <>
+inline int16x16_m256i Sub(int16x16_m256i a, int16x16_m256i b) {
+ return to_int16x16_m256i(_mm256_sub_epi16(a.v, b.v));
+}
+
+template <>
inline __m256i Neg(__m256i a) {
return _mm256_sign_epi32(a, _mm256_set1_epi32(-1));
}
template <>
+inline int16x16_m256i Neg(int16x16_m256i a) {
+ return to_int16x16_m256i(_mm256_sign_epi16(a.v, _mm256_set1_epi16(-1)));
+}
+
+template <>
inline __m256i ShiftLeft(__m256i a, int offset) {
return _mm256_slli_epi32(a, offset);
}
template <>
+inline int16x16_m256i ShiftLeft(int16x16_m256i a, int offset) {
+ return to_int16x16_m256i(_mm256_slli_epi16(a.v, offset));
+}
+
+template <>
inline __m256i ShiftRight(__m256i a, int offset) {
return _mm256_srai_epi32(a, offset);
}
template <>
+inline int16x16_m256i ShiftRight(int16x16_m256i a, int offset) {
+ return to_int16x16_m256i(_mm256_srai_epi16(a.v, offset));
+}
+
+template <>
inline __m256i SelectUsingMask(__m256i if_mask, __m256i then_val,
__m256i else_val) {
return _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(else_val),
@@ -88,45 +158,97 @@ inline __m256i SelectUsingMask(__m256i if_mask, __m256i then_val,
}
template <>
+inline int16x16_m256i SelectUsingMask(int16x16_m256i if_mask,
+ int16x16_m256i then_val,
+ int16x16_m256i else_val) {
+ // Borrowed from Intel's arm_neon_sse.h header.
+ return to_int16x16_m256i(
+ _mm256_or_si256(_mm256_and_si256(if_mask.v, then_val.v),
+ _mm256_andnot_si256(if_mask.v, else_val.v)));
+}
+
+template <>
inline __m256i MaskIfEqual(__m256i a, __m256i b) {
return _mm256_cmpeq_epi32(a, b);
}
template <>
+inline int16x16_m256i MaskIfEqual(int16x16_m256i a, int16x16_m256i b) {
+ return to_int16x16_m256i(_mm256_cmpeq_epi16(a.v, b.v));
+}
+
+template <>
inline __m256i MaskIfNotEqual(__m256i a, __m256i b) {
return BitNot(MaskIfEqual(a, b));
}
template <>
+inline int16x16_m256i MaskIfNotEqual(int16x16_m256i a, int16x16_m256i b) {
+ return BitNot(MaskIfEqual(a, b));
+}
+
+template <>
inline __m256i MaskIfZero(__m256i a) {
return MaskIfEqual(a, _mm256_set1_epi32(0));
}
template <>
+inline int16x16_m256i MaskIfZero(int16x16_m256i a) {
+ return MaskIfEqual(a, to_int16x16_m256i(_mm256_set1_epi16(0)));
+}
+
+template <>
inline __m256i MaskIfNonZero(__m256i a) {
return MaskIfNotEqual(a, _mm256_set1_epi32(0));
}
template <>
+inline int16x16_m256i MaskIfNonZero(int16x16_m256i a) {
+ return MaskIfNotEqual(a, to_int16x16_m256i(_mm256_set1_epi16(0)));
+}
+
+template <>
inline __m256i MaskIfGreaterThan(__m256i a, __m256i b) {
return _mm256_cmpgt_epi32(a, b);
}
template <>
+inline int16x16_m256i MaskIfGreaterThan(int16x16_m256i a, int16x16_m256i b) {
+ return to_int16x16_m256i(_mm256_cmpgt_epi16(a.v, b.v));
+}
+
+template <>
inline __m256i MaskIfLessThan(__m256i a, __m256i b) {
return _mm256_cmpgt_epi32(b, a);
}
template <>
+inline int16x16_m256i MaskIfLessThan(int16x16_m256i a, int16x16_m256i b) {
+ return to_int16x16_m256i(_mm256_cmpgt_epi16(b.v, a.v));
+}
+
+template <>
inline __m256i MaskIfGreaterThanOrEqual(__m256i a, __m256i b) {
return BitNot(MaskIfLessThan(a, b));
}
template <>
+inline int16x16_m256i MaskIfGreaterThanOrEqual(int16x16_m256i a,
+ int16x16_m256i b) {
+ return BitNot(MaskIfLessThan(a, b));
+}
+
+template <>
inline __m256i MaskIfLessThanOrEqual(__m256i a, __m256i b) {
return BitNot(MaskIfGreaterThan(a, b));
}
+template <>
+inline int16x16_m256i MaskIfLessThanOrEqual(int16x16_m256i a,
+ int16x16_m256i b) {
+ return BitNot(MaskIfGreaterThan(a, b));
+}
+
/* Assumptions:
- All and Any are used on masks.
- masks are all_ones for true lanes, all_zeroes otherwise.
@@ -139,11 +261,21 @@ inline bool All(__m256i a) {
}
template <>
+inline bool All(int16x16_m256i a) {
+ return _mm256_testc_si256(a.v, a.v);
+}
+
+template <>
inline bool Any(__m256i a) {
return BitNot(_mm256_testz_si256(a, a));
}
template <>
+inline bool Any(int16x16_m256i a) {
+ return BitNot(_mm256_testz_si256(a.v, a.v));
+}
+
+template <>
inline __m256i RoundingHalfSum(__m256i a, __m256i b) {
/* __m256i round_bit_mask, a_over_2, b_over_2, round_bit, sum; */
/* We divide the inputs before the add to avoid the overflow and costly test
@@ -171,6 +303,17 @@ inline __m256i RoundingHalfSum(__m256i a, __m256i b) {
}
template <>
+inline int16x16_m256i RoundingHalfSum(int16x16_m256i a, int16x16_m256i b) {
+ // Borrowed from Intel's arm_neon_sse.h header.
+ __m256i constant_neg_32768 = _mm256_set1_epi16(-32768);
+ __m256i a_unsigned = _mm256_sub_epi16(a.v, constant_neg_32768);
+ __m256i b_unsigned = _mm256_sub_epi16(b.v, constant_neg_32768);
+ __m256i avg_unsigned = _mm256_avg_epu16(a_unsigned, b_unsigned);
+ __m256i avg = _mm256_add_epi16(avg_unsigned, constant_neg_32768);
+ return to_int16x16_m256i(avg);
+}
+
+template <>
inline __m256i SaturatingRoundingDoublingHighMul(__m256i a, __m256i b) {
__m256i min, saturation_mask, a0_a2, a1_a3, b0_b2, b1_b3;
__m256i a0b0_a2b2, a1b1_a3b3, a0b0_a2b2_rounded, a1b1_a3b3_rounded;
@@ -209,10 +352,33 @@ inline __m256i SaturatingRoundingDoublingHighMul(__m256i a, __m256i b) {
}
template <>
+inline int16x16_m256i SaturatingRoundingDoublingHighMul(int16x16_m256i a,
+ int16x16_m256i b) {
+ // Use _mm256_mulhrs_epi16 then saturate with a bit-operation,
+ // borrowed from Intel's arm_neon_sse.h header.
+ __m256i result_unsaturated = _mm256_mulhrs_epi16(a.v, b.v);
+ __m256i saturation_mask =
+ _mm256_cmpeq_epi16(result_unsaturated, _mm256_set1_epi16(0x8000));
+ __m256i result = _mm256_xor_si256(result_unsaturated, saturation_mask);
+ return to_int16x16_m256i(result);
+}
+
+template <>
inline __m256i Dup<__m256i>(std::int32_t x) {
return _mm256_set1_epi32(x);
}
+template <>
+inline int16x16_m256i Dup<int16x16_m256i>(std::int16_t x) {
+ return to_int16x16_m256i(_mm256_set1_epi16(x));
+}
+
+// So far this is only needed for int16.
+template <>
+inline int16x16_m256i SaturatingAdd(int16x16_m256i a, int16x16_m256i b) {
+ return to_int16x16_m256i(_mm256_adds_epi16(a.v, b.v));
+}
+
} // end namespace gemmlowp
#endif // GEMMLOWP_INTERNAL_FIXEDPOINT_AVX_H_
diff --git a/fixedpoint/fixedpoint_sse.h b/fixedpoint/fixedpoint_sse.h
index a1fae32..fbaa26a 100644
--- a/fixedpoint/fixedpoint_sse.h
+++ b/fixedpoint/fixedpoint_sse.h
@@ -32,13 +32,17 @@ namespace gemmlowp {
// data type, int16x8_m128i, that wraps __m128i while being a separate
// type.
struct int16x8_m128i {
- int16x8_m128i() {}
- explicit int16x8_m128i(__m128i w) : v(w) {}
- ~int16x8_m128i() {}
-
__m128i v;
};
+// Keep int16x8_m128i trivially constructible/destructible and provide
+// easily optimized helper function.
+inline int16x8_m128i to_int16x8_m128i(__m128i w) {
+ int16x8_m128i r;
+ r.v = w;
+ return r;
+}
+
template <>
struct FixedPointRawTypeTraits<__m128i> {
typedef std::int32_t ScalarRawType;
@@ -58,7 +62,7 @@ inline __m128i BitAnd(__m128i a, __m128i b) {
template <>
inline int16x8_m128i BitAnd(int16x8_m128i a, int16x8_m128i b) {
- return int16x8_m128i(_mm_and_si128(a.v, b.v));
+ return to_int16x8_m128i(_mm_and_si128(a.v, b.v));
}
template <>
@@ -68,7 +72,7 @@ inline __m128i BitOr(__m128i a, __m128i b) {
template <>
inline int16x8_m128i BitOr(int16x8_m128i a, int16x8_m128i b) {
- return int16x8_m128i(_mm_or_si128(a.v, b.v));
+ return to_int16x8_m128i(_mm_or_si128(a.v, b.v));
}
template <>
@@ -78,7 +82,7 @@ inline __m128i BitXor(__m128i a, __m128i b) {
template <>
inline int16x8_m128i BitXor(int16x8_m128i a, int16x8_m128i b) {
- return int16x8_m128i(_mm_xor_si128(a.v, b.v));
+ return to_int16x8_m128i(_mm_xor_si128(a.v, b.v));
}
template <>
@@ -88,7 +92,7 @@ inline __m128i BitNot(__m128i a) {
template <>
inline int16x8_m128i BitNot(int16x8_m128i a) {
- return int16x8_m128i(_mm_andnot_si128(a.v, _mm_set1_epi16(-1)));
+ return to_int16x8_m128i(_mm_andnot_si128(a.v, _mm_set1_epi16(-1)));
}
template <>
@@ -98,7 +102,7 @@ inline __m128i Add(__m128i a, __m128i b) {
template <>
inline int16x8_m128i Add(int16x8_m128i a, int16x8_m128i b) {
- return int16x8_m128i(_mm_add_epi16(a.v, b.v));
+ return to_int16x8_m128i(_mm_add_epi16(a.v, b.v));
}
template <>
@@ -108,7 +112,7 @@ inline __m128i Mul(__m128i a, __m128i b) {
template <>
inline int16x8_m128i Mul(int16x8_m128i a, int16x8_m128i b) {
- return int16x8_m128i(_mm_mullo_epi16(a.v, b.v));
+ return to_int16x8_m128i(_mm_mullo_epi16(a.v, b.v));
}
template <>
@@ -118,7 +122,7 @@ inline __m128i Sub(__m128i a, __m128i b) {
template <>
inline int16x8_m128i Sub(int16x8_m128i a, int16x8_m128i b) {
- return int16x8_m128i(_mm_sub_epi16(a.v, b.v));
+ return to_int16x8_m128i(_mm_sub_epi16(a.v, b.v));
}
template <>
@@ -128,7 +132,7 @@ inline __m128i Neg(__m128i a) {
template <>
inline int16x8_m128i Neg(int16x8_m128i a) {
- return int16x8_m128i(_mm_sign_epi16(a.v, _mm_set1_epi16(-1)));
+ return to_int16x8_m128i(_mm_sign_epi16(a.v, _mm_set1_epi16(-1)));
}
template <>
@@ -138,7 +142,7 @@ inline __m128i ShiftLeft(__m128i a, int offset) {
template <>
inline int16x8_m128i ShiftLeft(int16x8_m128i a, int offset) {
- return int16x8_m128i(_mm_slli_epi16(a.v, offset));
+ return to_int16x8_m128i(_mm_slli_epi16(a.v, offset));
}
template <>
@@ -148,7 +152,7 @@ inline __m128i ShiftRight(__m128i a, int offset) {
template <>
inline int16x8_m128i ShiftRight(int16x8_m128i a, int offset) {
- return int16x8_m128i(_mm_srai_epi16(a.v, offset));
+ return to_int16x8_m128i(_mm_srai_epi16(a.v, offset));
}
template <>
@@ -164,7 +168,7 @@ inline int16x8_m128i SelectUsingMask(int16x8_m128i if_mask,
int16x8_m128i then_val,
int16x8_m128i else_val) {
// borrowed from Intel's arm_neon_sse.h header.
- return int16x8_m128i(SelectUsingMask(if_mask.v, then_val.v, else_val.v));
+ return to_int16x8_m128i(SelectUsingMask(if_mask.v, then_val.v, else_val.v));
}
template <>
@@ -174,7 +178,7 @@ inline __m128i MaskIfEqual(__m128i a, __m128i b) {
template <>
inline int16x8_m128i MaskIfEqual(int16x8_m128i a, int16x8_m128i b) {
- return int16x8_m128i(_mm_cmpeq_epi16(a.v, b.v));
+ return to_int16x8_m128i(_mm_cmpeq_epi16(a.v, b.v));
}
template <>
@@ -194,7 +198,7 @@ inline __m128i MaskIfZero(__m128i a) {
template <>
inline int16x8_m128i MaskIfZero(int16x8_m128i a) {
- return MaskIfEqual(a, int16x8_m128i(_mm_set1_epi16(0)));
+ return MaskIfEqual(a, to_int16x8_m128i(_mm_set1_epi16(0)));
}
template <>
@@ -204,7 +208,7 @@ inline __m128i MaskIfNonZero(__m128i a) {
template <>
inline int16x8_m128i MaskIfNonZero(int16x8_m128i a) {
- return MaskIfNotEqual(a, int16x8_m128i(_mm_set1_epi16(0)));
+ return MaskIfNotEqual(a, to_int16x8_m128i(_mm_set1_epi16(0)));
}
template <>
@@ -214,7 +218,7 @@ inline __m128i MaskIfGreaterThan(__m128i a, __m128i b) {
template <>
inline int16x8_m128i MaskIfGreaterThan(int16x8_m128i a, int16x8_m128i b) {
- return int16x8_m128i(_mm_cmpgt_epi16(a.v, b.v));
+ return to_int16x8_m128i(_mm_cmpgt_epi16(a.v, b.v));
}
template <>
@@ -224,7 +228,7 @@ inline __m128i MaskIfLessThan(__m128i a, __m128i b) {
template <>
inline int16x8_m128i MaskIfLessThan(int16x8_m128i a, int16x8_m128i b) {
- return int16x8_m128i(_mm_cmplt_epi16(a.v, b.v));
+ return to_int16x8_m128i(_mm_cmplt_epi16(a.v, b.v));
}
template <>
@@ -310,7 +314,7 @@ inline int16x8_m128i RoundingHalfSum(int16x8_m128i a, int16x8_m128i b) {
__m128i b_unsigned = _mm_sub_epi16(b.v, constant_neg_32768);
__m128i avg_unsigned = _mm_avg_epu16(a_unsigned, b_unsigned);
__m128i avg = _mm_add_epi16(avg_unsigned, constant_neg_32768);
- return int16x8_m128i(avg);
+ return to_int16x8_m128i(avg);
}
template <>
@@ -360,7 +364,7 @@ inline int16x8_m128i SaturatingRoundingDoublingHighMul(int16x8_m128i a,
__m128i saturation_mask =
_mm_cmpeq_epi16(result_unsaturated, _mm_set1_epi16(0x8000));
__m128i result = _mm_xor_si128(result_unsaturated, saturation_mask);
- return int16x8_m128i(result);
+ return to_int16x8_m128i(result);
}
template <>
@@ -370,13 +374,13 @@ inline __m128i Dup<__m128i>(std::int32_t x) {
template <>
inline int16x8_m128i Dup<int16x8_m128i>(std::int16_t x) {
- return int16x8_m128i(_mm_set1_epi16(x));
+ return to_int16x8_m128i(_mm_set1_epi16(x));
}
// So far this is only needed for int16.
template <>
inline int16x8_m128i SaturatingAdd(int16x8_m128i a, int16x8_m128i b) {
- return int16x8_m128i(_mm_adds_epi16(a.v, b.v));
+ return to_int16x8_m128i(_mm_adds_epi16(a.v, b.v));
}
} // end namespace gemmlowp
diff --git a/fixedpoint/fixedpoint_wasmsimd.h b/fixedpoint/fixedpoint_wasmsimd.h
new file mode 100644
index 0000000..868fbfe
--- /dev/null
+++ b/fixedpoint/fixedpoint_wasmsimd.h
@@ -0,0 +1,381 @@
+// Copyright 2020 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// fixedpoint_wasmsimd.h: optimized WAsm SIMD specializations of the templates
+// in fixedpoint.h.
+
+#ifndef GEMMLOWP_INTERNAL_FIXEDPOINT_WASMSIMD_H_
+#define GEMMLOWP_INTERNAL_FIXEDPOINT_WASMSIMD_H_
+
+#include <wasm_simd128.h>
+
+namespace gemmlowp {
+
+// WAsm SIMD intrinsics are not typed: there is a single v128_t vector
+// type that does not distinguish between "int32x4" and "int16x8" use
+// cases, unlike the NEON equivalents. Because we had initially focused
+// on int32x4, we did not pay attention and specialized these fixedpoint
+// templates directly for v128_t hardcoding the int32x4 semantics,
+// not leaving room for int16x8 semantics. Amending that by adding a separate
+// data type, int16x8_v128_t, that wraps v128_t while being a separate
+// type.
+struct int16x8_v128_t {
+ v128_t v;
+};
+
+// Keep int16x8_v128_t trivially constructible/destructible and provide
+// easily optimized helper function.
+inline int16x8_v128_t to_int16x8_v128_t(v128_t w) {
+ int16x8_v128_t r;
+ r.v = w;
+ return r;
+}
+
+template <>
+struct FixedPointRawTypeTraits<v128_t> {
+ typedef std::int32_t ScalarRawType;
+ static constexpr int kLanes = 4;
+};
+
+template <>
+struct FixedPointRawTypeTraits<int16x8_v128_t> {
+ typedef std::int16_t ScalarRawType;
+ static constexpr int kLanes = 8;
+};
+
+template <>
+inline v128_t BitAnd(v128_t a, v128_t b) {
+ return wasm_v128_and(a, b);
+}
+
+template <>
+inline int16x8_v128_t BitAnd(int16x8_v128_t a, int16x8_v128_t b) {
+ return to_int16x8_v128_t(wasm_v128_and(a.v, b.v));
+}
+
+template <>
+inline v128_t BitOr(v128_t a, v128_t b) {
+ return wasm_v128_or(a, b);
+}
+
+template <>
+inline int16x8_v128_t BitOr(int16x8_v128_t a, int16x8_v128_t b) {
+ return to_int16x8_v128_t(wasm_v128_or(a.v, b.v));
+}
+
+template <>
+inline v128_t BitXor(v128_t a, v128_t b) {
+ return wasm_v128_xor(a, b);
+}
+
+template <>
+inline int16x8_v128_t BitXor(int16x8_v128_t a, int16x8_v128_t b) {
+ return to_int16x8_v128_t(wasm_v128_xor(a.v, b.v));
+}
+
+template <>
+inline v128_t BitNot(v128_t a) {
+ return wasm_v128_not(a);
+}
+
+template <>
+inline int16x8_v128_t BitNot(int16x8_v128_t a) {
+ return to_int16x8_v128_t(wasm_v128_not(a.v));
+}
+
+template <>
+inline v128_t Add(v128_t a, v128_t b) {
+ return wasm_i32x4_add(a, b);
+}
+
+template <>
+inline int16x8_v128_t Add(int16x8_v128_t a, int16x8_v128_t b) {
+ return to_int16x8_v128_t(wasm_i16x8_add(a.v, b.v));
+}
+
+template <>
+inline v128_t Mul(v128_t a, v128_t b) {
+ return wasm_i32x4_mul(a, b);
+}
+
+template <>
+inline int16x8_v128_t Mul(int16x8_v128_t a, int16x8_v128_t b) {
+ return to_int16x8_v128_t(wasm_i16x8_mul(a.v, b.v));
+}
+
+template <>
+inline v128_t Sub(v128_t a, v128_t b) {
+ return wasm_i32x4_sub(a, b);
+}
+
+template <>
+inline int16x8_v128_t Sub(int16x8_v128_t a, int16x8_v128_t b) {
+ return to_int16x8_v128_t(wasm_i16x8_sub(a.v, b.v));
+}
+
+template <>
+inline v128_t Neg(v128_t a) {
+ return wasm_i32x4_neg(a);
+}
+
+template <>
+inline int16x8_v128_t Neg(int16x8_v128_t a) {
+ return to_int16x8_v128_t(wasm_i16x8_neg(a.v));
+}
+
+template <>
+inline v128_t ShiftLeft(v128_t a, int offset) {
+ return wasm_i32x4_shl(a, offset);
+}
+
+template <>
+inline int16x8_v128_t ShiftLeft(int16x8_v128_t a, int offset) {
+ return to_int16x8_v128_t(wasm_i16x8_shl(a.v, offset));
+}
+
+template <>
+inline v128_t ShiftRight(v128_t a, int offset) {
+ return wasm_i32x4_shr(a, offset);
+}
+
+template <>
+inline int16x8_v128_t ShiftRight(int16x8_v128_t a, int offset) {
+ return to_int16x8_v128_t(wasm_i16x8_shr(a.v, offset));
+}
+
+template <>
+inline v128_t SelectUsingMask(v128_t if_mask, v128_t then_val,
+ v128_t else_val) {
+ return wasm_v128_bitselect(then_val, else_val, if_mask);
+}
+
+template <>
+inline int16x8_v128_t SelectUsingMask(int16x8_v128_t if_mask,
+ int16x8_v128_t then_val,
+ int16x8_v128_t else_val) {
+ return to_int16x8_v128_t(
+ wasm_v128_bitselect(then_val.v, else_val.v, if_mask.v));
+}
+
+template <>
+inline v128_t MaskIfEqual(v128_t a, v128_t b) {
+ return wasm_i32x4_eq(a, b);
+}
+
+template <>
+inline int16x8_v128_t MaskIfEqual(int16x8_v128_t a, int16x8_v128_t b) {
+ return to_int16x8_v128_t(wasm_i16x8_eq(a.v, b.v));
+}
+
+template <>
+inline v128_t MaskIfNotEqual(v128_t a, v128_t b) {
+ return wasm_i32x4_ne(a, b);
+}
+
+template <>
+inline int16x8_v128_t MaskIfNotEqual(int16x8_v128_t a, int16x8_v128_t b) {
+ return to_int16x8_v128_t(wasm_i16x8_ne(a.v, b.v));
+}
+
+template <>
+inline v128_t MaskIfZero(v128_t a) {
+ return MaskIfEqual(a, wasm_i32x4_const(0, 0, 0, 0));
+}
+
+template <>
+inline int16x8_v128_t MaskIfZero(int16x8_v128_t a) {
+ return MaskIfEqual(
+ a, to_int16x8_v128_t(wasm_i16x8_const(0, 0, 0, 0, 0, 0, 0, 0)));
+}
+
+template <>
+inline v128_t MaskIfNonZero(v128_t a) {
+ return MaskIfNotEqual(a, wasm_i32x4_const(0, 0, 0, 0));
+}
+
+template <>
+inline int16x8_v128_t MaskIfNonZero(int16x8_v128_t a) {
+ return MaskIfNotEqual(
+ a, to_int16x8_v128_t(wasm_i16x8_const(0, 0, 0, 0, 0, 0, 0, 0)));
+}
+
+template <>
+inline v128_t MaskIfGreaterThan(v128_t a, v128_t b) {
+ return wasm_i32x4_gt(a, b);
+}
+
+template <>
+inline int16x8_v128_t MaskIfGreaterThan(int16x8_v128_t a, int16x8_v128_t b) {
+ return to_int16x8_v128_t(wasm_i16x8_gt(a.v, b.v));
+}
+
+template <>
+inline v128_t MaskIfLessThan(v128_t a, v128_t b) {
+ return wasm_i32x4_lt(a, b);
+}
+
+template <>
+inline int16x8_v128_t MaskIfLessThan(int16x8_v128_t a, int16x8_v128_t b) {
+ return to_int16x8_v128_t(wasm_i16x8_lt(a.v, b.v));
+}
+
+template <>
+inline v128_t MaskIfGreaterThanOrEqual(v128_t a, v128_t b) {
+ return wasm_i32x4_ge(a, b);
+}
+
+template <>
+inline int16x8_v128_t MaskIfGreaterThanOrEqual(int16x8_v128_t a,
+ int16x8_v128_t b) {
+ return to_int16x8_v128_t(wasm_i16x8_ge(a.v, b.v));
+}
+
+template <>
+inline v128_t MaskIfLessThanOrEqual(v128_t a, v128_t b) {
+ return wasm_i32x4_le(a, b);
+}
+
+template <>
+inline int16x8_v128_t MaskIfLessThanOrEqual(int16x8_v128_t a,
+ int16x8_v128_t b) {
+ return to_int16x8_v128_t(wasm_i16x8_le(a.v, b.v));
+}
+
+/* Assumptions:
+ - All and Any are used on masks.
+ - masks are all_ones for true lanes, all_zeroes otherwise.
+Hence, All means all 128bits set, and Any means any bit set.
+*/
+
+template <>
+inline bool All(v128_t a) {
+ return wasm_i32x4_all_true(a);
+}
+
+template <>
+inline bool All(int16x8_v128_t a) {
+ return wasm_i16x8_all_true(a.v);
+}
+
+template <>
+inline bool Any(v128_t a) {
+ return wasm_i32x4_any_true(a);
+}
+
+template <>
+inline bool Any(int16x8_v128_t a) {
+ return wasm_i16x8_any_true(a.v);
+}
+
+template <>
+inline v128_t RoundingHalfSum(v128_t a, v128_t b) {
+ // We divide the inputs before the add to avoid the overflow and costly test.
+ const v128_t one = wasm_i32x4_const(1, 1, 1, 1);
+ const v128_t sign_bit_mask =
+ wasm_i32x4_const(0x80000000, 0x80000000, 0x80000000, 0x80000000);
+ const v128_t sum = Add(a, b);
+ const v128_t rounded_half_sum = ShiftRight(Add(sum, one), 1);
+ const v128_t overflow =
+ BitAnd(BitAnd(BitXor(a, rounded_half_sum), BitXor(b, rounded_half_sum)),
+ sign_bit_mask);
+ const v128_t result = BitXor(rounded_half_sum, overflow);
+ return result;
+}
+
+template <>
+inline int16x8_v128_t RoundingHalfSum(int16x8_v128_t a, int16x8_v128_t b) {
+ // Idea: go to unsigned to use wasm_u16x8_avgr,
+ // borrowed from Intel's arm_neon_sse.h header.
+ const v128_t constant_neg_32768 = wasm_i16x8_const(
+ -32768, -32768, -32768, -32768, -32768, -32768, -32768, -32768);
+ const v128_t a_unsigned = wasm_v128_xor(a.v, constant_neg_32768);
+ const v128_t b_unsigned = wasm_v128_xor(b.v, constant_neg_32768);
+ const v128_t avg_unsigned = wasm_u16x8_avgr(a_unsigned, b_unsigned);
+ const v128_t avg = wasm_v128_xor(avg_unsigned, constant_neg_32768);
+ return to_int16x8_v128_t(avg);
+}
+
+template <>
+inline v128_t SaturatingRoundingDoublingHighMul(v128_t a, v128_t b) {
+ // TODO: switch to extended multiplication once implemented in the toolchain
+ const v128_t a_sign = wasm_i32x4_shr(a, 31);
+ const v128_t b_sign = wasm_i32x4_shr(b, 31);
+
+ const v128_t a_ext_lo = wasm_v32x4_shuffle(a, a_sign, 0, 4, 1, 5);
+ const v128_t a_ext_hi = wasm_v32x4_shuffle(a, a_sign, 2, 6, 3, 7);
+ const v128_t b_ext_lo = wasm_v32x4_shuffle(b, b_sign, 0, 4, 1, 5);
+ const v128_t b_ext_hi = wasm_v32x4_shuffle(b, b_sign, 2, 6, 3, 7);
+
+ const v128_t ab_lo = wasm_i64x2_mul(a_ext_lo, b_ext_lo);
+ const v128_t ab_hi = wasm_i64x2_mul(a_ext_hi, b_ext_hi);
+
+ const v128_t nudge_2x =
+ wasm_i64x2_const(INT64_C(0x80000000), INT64_C(0x80000000));
+ const v128_t ab_lo_2x = wasm_i64x2_add(ab_lo, ab_lo);
+ const v128_t ab_hi_2x = wasm_i64x2_add(ab_hi, ab_hi);
+
+ const v128_t ab_lo_rounded_2x = wasm_i64x2_add(ab_lo_2x, nudge_2x);
+ const v128_t ab_hi_rounded_2x = wasm_i64x2_add(ab_hi_2x, nudge_2x);
+
+ const v128_t prod =
+ wasm_v32x4_shuffle(ab_lo_rounded_2x, ab_hi_rounded_2x, 1, 3, 5, 7);
+
+ // Saturation only happen if a == b == INT_MIN, and this is the only case
+ // where prod == INT_MIN (0x80000000) instead of INT_MAX (0x7FFFFFFF).
+ const v128_t min = wasm_i32x4_const(INT32_C(0x80000000), INT32_C(0x80000000),
+ INT32_C(0x80000000), INT32_C(0x80000000));
+
+ return wasm_v128_xor(prod, wasm_i32x4_eq(prod, min));
+}
+
+template <>
+inline int16x8_v128_t SaturatingRoundingDoublingHighMul(int16x8_v128_t a,
+ int16x8_v128_t b) {
+#if 0
+ // TODO: enable if https://github.com/WebAssembly/simd/pull/365 is accepted
+ return to_int16x8_v128_t(__builtin_wasm_q15mulr_saturate_s_i16x8(a.v, b.v));
+#else
+ // TODO: switch to extended multiplication once implemented in the toolchain
+ v128_t lo = wasm_i32x4_mul(wasm_i32x4_widen_low_i16x8(a.v),
+ wasm_i32x4_widen_low_i16x8(b.v));
+ v128_t hi = wasm_i32x4_mul(wasm_i32x4_widen_high_i16x8(a.v),
+ wasm_i32x4_widen_high_i16x8(b.v));
+ const v128_t inc = wasm_i32x4_const(0x4000, 0x4000, 0x4000, 0x4000);
+ lo = wasm_i32x4_add(lo, inc);
+ hi = wasm_i32x4_add(hi, inc);
+ lo = wasm_i32x4_shr(lo, 15);
+ hi = wasm_i32x4_shr(hi, 15);
+ return to_int16x8_v128_t(wasm_i16x8_narrow_i32x4(lo, hi));
+#endif
+}
+
+template <>
+inline v128_t Dup<v128_t>(std::int32_t x) {
+ return wasm_i32x4_splat(x);
+}
+
+template <>
+inline int16x8_v128_t Dup<int16x8_v128_t>(std::int16_t x) {
+ return to_int16x8_v128_t(wasm_i16x8_splat(x));
+}
+
+// So far this is only needed for int16.
+template <>
+inline int16x8_v128_t SaturatingAdd(int16x8_v128_t a, int16x8_v128_t b) {
+ return to_int16x8_v128_t(wasm_i16x8_add_saturate(a.v, b.v));
+}
+
+} // end namespace gemmlowp
+
+#endif // GEMMLOWP_INTERNAL_FIXEDPOINT_WASMSIMD_H_