aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarat Dukhan <maratek@google.com>2020-09-08 23:57:14 -0700
committerXNNPACK Team <xnnpack-github-robot@google.com>2020-09-08 23:57:52 -0700
commite6dc0b6df1cabbb370650927e810511877813b8a (patch)
treeae0c472b4fc044a7b511b41f0989a3851048d803
parentbb9225e24044023b0cc0ae14c8b53ff3f0d94419 (diff)
downloadXNNPACK-e6dc0b6df1cabbb370650927e810511877813b8a.tar.gz
AVX2 versions of QS8 VADD[C] microkernels
PiperOrigin-RevId: 330666026
-rw-r--r--BUILD.bazel8
-rwxr-xr-xCMakeLists.txt8
-rwxr-xr-xscripts/generate-qs8-vadd.sh11
-rw-r--r--src/qs8-vadd/avx2-mul32-ld64.c.in154
-rw-r--r--src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x16.c105
-rw-r--r--src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x24.c115
-rw-r--r--src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x32.c121
-rw-r--r--src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x8.c90
-rw-r--r--src/qs8-vaddc/avx2-mul32-ld64.c.in145
-rw-r--r--src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x16.c96
-rw-r--r--src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x24.c104
-rw-r--r--src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x32.c108
-rw-r--r--src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x8.c84
-rw-r--r--src/xnnpack/vadd.h10
-rw-r--r--test/qs8-vadd-minmax.cc640
-rw-r--r--test/qs8-vadd-minmax.yaml4
-rw-r--r--test/qs8-vaddc-minmax.cc556
-rw-r--r--test/qs8-vaddc-minmax.yaml4
18 files changed, 2363 insertions, 0 deletions
diff --git a/BUILD.bazel b/BUILD.bazel
index fb0d2de0c..70a88107c 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -2418,6 +2418,14 @@ AVX2_UKERNELS = [
"src/qs8-igemm/gen/1x8c8-minmax-avx2.c",
"src/qs8-igemm/gen/2x8c8-minmax-avx2.c",
"src/qs8-igemm/gen/3x8c8-minmax-avx2.c",
+ "src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x8.c",
+ "src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x16.c",
+ "src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x24.c",
+ "src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x32.c",
+ "src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x8.c",
+ "src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x16.c",
+ "src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x24.c",
+ "src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x32.c",
"src/math/exp-avx2-p5.c",
"src/math/exp-avx2-perm-p3.c",
"src/math/exp-avx2-perm-p4.c",
diff --git a/CMakeLists.txt b/CMakeLists.txt
index ba6e355bf..0235c0fb9 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1897,6 +1897,14 @@ SET(XNNPACK_AVX2_MICROKERNEL_SRCS
src/qs8-igemm/gen/1x8c8-minmax-avx2.c
src/qs8-igemm/gen/2x8c8-minmax-avx2.c
src/qs8-igemm/gen/3x8c8-minmax-avx2.c
+ src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x8.c
+ src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x16.c
+ src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x24.c
+ src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x32.c
+ src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x8.c
+ src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x16.c
+ src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x24.c
+ src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x32.c
src/math/exp-avx2-p5.c
src/math/exp-avx2-perm-p3.c
src/math/exp-avx2-perm-p4.c
diff --git a/scripts/generate-qs8-vadd.sh b/scripts/generate-qs8-vadd.sh
index da86632df..59bb3afae 100755
--- a/scripts/generate-qs8-vadd.sh
+++ b/scripts/generate-qs8-vadd.sh
@@ -67,6 +67,17 @@ tools/xngen src/qs8-vaddc/sse-mul32-ld32.c.in -D BATCH_TILE=16 -D SSE=5 -o src/q
tools/xngen src/qs8-vaddc/sse-mul32-ld32.c.in -D BATCH_TILE=24 -D SSE=5 -o src/qs8-vaddc/gen/minmax-xop-mul32-ld32-x24.c
tools/xngen src/qs8-vaddc/sse-mul32-ld32.c.in -D BATCH_TILE=32 -D SSE=5 -o src/qs8-vaddc/gen/minmax-xop-mul32-ld32-x32.c
+################################### x86 AVX ###################################
+tools/xngen src/qs8-vadd/avx2-mul32-ld64.c.in -D BATCH_TILE=8 -o src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x8.c
+tools/xngen src/qs8-vadd/avx2-mul32-ld64.c.in -D BATCH_TILE=16 -o src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x16.c
+tools/xngen src/qs8-vadd/avx2-mul32-ld64.c.in -D BATCH_TILE=24 -o src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x24.c
+tools/xngen src/qs8-vadd/avx2-mul32-ld64.c.in -D BATCH_TILE=32 -o src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x32.c
+
+tools/xngen src/qs8-vaddc/avx2-mul32-ld64.c.in -D BATCH_TILE=8 -o src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x8.c
+tools/xngen src/qs8-vaddc/avx2-mul32-ld64.c.in -D BATCH_TILE=16 -o src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x16.c
+tools/xngen src/qs8-vaddc/avx2-mul32-ld64.c.in -D BATCH_TILE=24 -o src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x24.c
+tools/xngen src/qs8-vaddc/avx2-mul32-ld64.c.in -D BATCH_TILE=32 -o src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x32.c
+
################################## Unit tests #################################
tools/generate-vbinary-test.py --tester VAddMicrokernelTester --spec test/qs8-vadd-minmax.yaml --output test/qs8-vadd-minmax.cc
tools/generate-vbinary-test.py --tester VAddCMicrokernelTester --spec test/qs8-vaddc-minmax.yaml --output test/qs8-vaddc-minmax.cc
diff --git a/src/qs8-vadd/avx2-mul32-ld64.c.in b/src/qs8-vadd/avx2-mul32-ld64.c.in
new file mode 100644
index 000000000..ca320538c
--- /dev/null
+++ b/src/qs8-vadd/avx2-mul32-ld64.c.in
@@ -0,0 +1,154 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 8 == 0
+$assert BATCH_TILE >= 8
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vadd.h>
+
+
+void xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x${BATCH_TILE}(
+ size_t n,
+ const int8_t* input_x,
+ const int8_t* input_y,
+ int8_t* output,
+ const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ const __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.zero_point_product));
+ const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier));
+ const __m256i vy_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.y_multiplier));
+ const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask));
+ const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold));
+ const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
+ $if BATCH_TILE > 8:
+ const __m256i voutput_zero_point = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_zero_point));
+ const __m256i voutput_min = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_min));
+ const __m256i voutput_max = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_max));
+ $else:
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+
+ for (; n >= ${BATCH_TILE} * sizeof(int8_t); n -= ${BATCH_TILE} * sizeof(int8_t)) {
+ const __m256i vx${ABC[0:8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ const __m256i vy${ABC[0:8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_y));
+ $for N in range(8, BATCH_TILE, 8):
+ const __m256i vx${ABC[N:N+8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + ${N})));
+ const __m256i vy${ABC[N:N+8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_y + ${N})));
+ input_x += ${BATCH_TILE};
+ input_y += ${BATCH_TILE};
+
+ $for N in range(0, BATCH_TILE, 8):
+ __m256i vacc${ABC[N:N+8]} = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx${ABC[N:N+8]}, vx_multiplier));
+
+ $for N in range(0, BATCH_TILE, 8):
+ vacc${ABC[N:N+8]} = _mm256_add_epi32(vacc${ABC[N:N+8]}, _mm256_mullo_epi32(vy${ABC[N:N+8]}, vy_multiplier));
+
+ $for N in range(0, BATCH_TILE, 8):
+ const __m256i vrem${ABC[N:N+8]} = _mm256_add_epi32(_mm256_and_si256(vacc${ABC[N:N+8]}, vremainder_mask), _mm256_srai_epi32(vacc${ABC[N:N+8]}, 31));
+
+ $for N in range(0, BATCH_TILE, 8):
+ vacc${ABC[N:N+8]} = _mm256_sub_epi32(_mm256_sra_epi32(vacc${ABC[N:N+8]}, vshift), _mm256_cmpgt_epi32(vrem${ABC[N:N+8]}, vremainder_threshold));
+
+ $for N in range(0, BATCH_TILE, 16):
+ $if N + 8 < BATCH_TILE:
+ __m256i vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]} = _mm256_adds_epi16(_mm256_packs_epi32(vacc${ABC[N:N+8]}, vacc${ABC[N+8:N+16]}), voutput_zero_point);
+ $elif BATCH_TILE > 8:
+ __m128i vout${ABC[N:N+8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[N:N+8]}), _mm256_extracti128_si256(vacc${ABC[N:N+8]}, 1)), _mm256_castsi256_si128(voutput_zero_point));
+ $else:
+ __m128i vout${ABC[N:N+8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[N:N+8]}), _mm256_extracti128_si256(vacc${ABC[N:N+8]}, 1)), voutput_zero_point);
+
+ $for N in range(0, BATCH_TILE, 16):
+ $if N + 8 < BATCH_TILE:
+ vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]} = _mm256_min_epi16(_mm256_max_epi16(vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]}, voutput_min), voutput_max);
+ $elif BATCH_TILE > 8:
+ vout${ABC[N:N+8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[N:N+8]}, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max));
+ $else:
+ vout${ABC[N:N+8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[N:N+8]}, voutput_min), voutput_max);
+
+ $for N in range(0, BATCH_TILE, 16):
+ $if N + 8 < BATCH_TILE:
+ __m128i vout${ABC[N:N+16]} = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]}), _mm256_extracti128_si256(vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]}, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+ $else:
+ __m128i vout${ABC[N:N+8]}${ABC[N:N+8]} = _mm_packs_epi16(vout${ABC[N:N+8]}, vout${ABC[N:N+8]});
+
+ $if BATCH_TILE >= 16:
+ _mm_storeu_si128((__m128i*) output, vout${ABC[0:16]});
+ $else:
+ _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]});
+ $for N in range(16, BATCH_TILE, 16):
+ $if N + 8 < BATCH_TILE:
+ _mm_storeu_si128((__m128i*) (output + ${N}), vout${ABC[N:N+16]});
+ $else:
+ _mm_storel_epi64((__m128i*) (output + ${N}), vout${ABC[N:N+8]}${ABC[N:N+8]});
+ output += ${BATCH_TILE};
+ }
+ if XNN_UNLIKELY(n != 0) {
+ ${"do " if BATCH_TILE > 8 else ""}{
+ const __m256i vx${ABC[0:8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ const __m256i vy${ABC[0:8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_y));
+ $if BATCH_TILE > 8:
+ input_x += 8;
+ input_y += 8;
+
+ __m256i vacc${ABC[0:8]} = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx${ABC[0:8]}, vx_multiplier));
+
+ vacc${ABC[0:8]} = _mm256_add_epi32(vacc${ABC[0:8]}, _mm256_mullo_epi32(vy${ABC[0:8]}, vy_multiplier));
+
+ const __m256i vrem${ABC[0:8]} = _mm256_add_epi32(_mm256_and_si256(vacc${ABC[0:8]}, vremainder_mask), _mm256_srai_epi32(vacc${ABC[0:8]}, 31));
+
+ vacc${ABC[0:8]} = _mm256_sub_epi32(_mm256_sra_epi32(vacc${ABC[0:8]}, vshift), _mm256_cmpgt_epi32(vrem${ABC[0:8]}, vremainder_threshold));
+
+ $if BATCH_TILE > 8:
+ __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[0:8]}), _mm256_extracti128_si256(vacc${ABC[0:8]}, 1)), _mm256_castsi256_si128(voutput_zero_point));
+ vout${ABC[0:8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[0:8]}, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max));
+ $else:
+ __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[0:8]}), _mm256_extracti128_si256(vacc${ABC[0:8]}, 1)), voutput_zero_point);
+ vout${ABC[0:8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[0:8]}, voutput_min), voutput_max);
+ __m128i vout${ABC[0:8]}${ABC[0:8]} = _mm_packs_epi16(vout${ABC[0:8]}, vout${ABC[0:8]});
+
+ $if BATCH_TILE > 8:
+ if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
+ _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]});
+ output += 8;
+ n -= 8 * sizeof(int8_t);
+ } else {
+ if (n & (4 * sizeof(int8_t))) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
+ vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32);
+ output += 4;
+ }
+ if (n & (2 * sizeof(int8_t))) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
+ vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16);
+ output += 2;
+ }
+ if (n & (1 * sizeof(int8_t))) {
+ *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
+ }
+ n = 0;
+ }
+ $else:
+ if (n & (4 * sizeof(int8_t))) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
+ vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32);
+ output += 4;
+ }
+ if (n & (2 * sizeof(int8_t))) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
+ vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16);
+ output += 2;
+ }
+ if (n & (1 * sizeof(int8_t))) {
+ *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
+ }
+ }${" while (n != 0);" if BATCH_TILE > 8 else ""}
+ }
+}
diff --git a/src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x16.c b/src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x16.c
new file mode 100644
index 000000000..a5ef37980
--- /dev/null
+++ b/src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x16.c
@@ -0,0 +1,105 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-vadd/avx2-mul32-ld64.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vadd.h>
+
+
+void xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16(
+ size_t n,
+ const int8_t* input_x,
+ const int8_t* input_y,
+ int8_t* output,
+ const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ const __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.zero_point_product));
+ const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier));
+ const __m256i vy_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.y_multiplier));
+ const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask));
+ const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold));
+ const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
+ const __m256i voutput_zero_point = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_zero_point));
+ const __m256i voutput_min = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_min));
+ const __m256i voutput_max = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_max));
+
+ for (; n >= 16 * sizeof(int8_t); n -= 16 * sizeof(int8_t)) {
+ const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ const __m256i vy01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_y));
+ const __m256i vx89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 8)));
+ const __m256i vy89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_y + 8)));
+ input_x += 16;
+ input_y += 16;
+
+ __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
+ __m256i vacc89ABCDEF = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx89ABCDEF, vx_multiplier));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vy01234567, vy_multiplier));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vy89ABCDEF, vy_multiplier));
+
+ const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
+ const __m256i vrem89ABCDEF = _mm256_add_epi32(_mm256_and_si256(vacc89ABCDEF, vremainder_mask), _mm256_srai_epi32(vacc89ABCDEF, 31));
+
+ vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+ vacc89ABCDEF = _mm256_sub_epi32(_mm256_sra_epi32(vacc89ABCDEF, vshift), _mm256_cmpgt_epi32(vrem89ABCDEF, vremainder_threshold));
+
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
+
+ vout012389AB4567CDEF = _mm256_min_epi16(_mm256_max_epi16(vout012389AB4567CDEF, voutput_min), voutput_max);
+
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ output += 16;
+ }
+ if XNN_UNLIKELY(n != 0) {
+ do {
+ const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ const __m256i vy01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_y));
+ input_x += 8;
+ input_y += 8;
+
+ __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vy01234567, vy_multiplier));
+
+ const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
+
+ vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), _mm256_castsi256_si128(voutput_zero_point));
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max));
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ n -= 8 * sizeof(int8_t);
+ } else {
+ if (n & (4 * sizeof(int8_t))) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (n & (2 * sizeof(int8_t))) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (n & (1 * sizeof(int8_t))) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ }
+ n = 0;
+ }
+ } while (n != 0);
+ }
+}
diff --git a/src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x24.c b/src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x24.c
new file mode 100644
index 000000000..95ed64886
--- /dev/null
+++ b/src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x24.c
@@ -0,0 +1,115 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-vadd/avx2-mul32-ld64.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vadd.h>
+
+
+void xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24(
+ size_t n,
+ const int8_t* input_x,
+ const int8_t* input_y,
+ int8_t* output,
+ const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ const __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.zero_point_product));
+ const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier));
+ const __m256i vy_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.y_multiplier));
+ const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask));
+ const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold));
+ const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
+ const __m256i voutput_zero_point = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_zero_point));
+ const __m256i voutput_min = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_min));
+ const __m256i voutput_max = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_max));
+
+ for (; n >= 24 * sizeof(int8_t); n -= 24 * sizeof(int8_t)) {
+ const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ const __m256i vy01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_y));
+ const __m256i vx89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 8)));
+ const __m256i vy89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_y + 8)));
+ const __m256i vxGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 16)));
+ const __m256i vyGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_y + 16)));
+ input_x += 24;
+ input_y += 24;
+
+ __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
+ __m256i vacc89ABCDEF = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx89ABCDEF, vx_multiplier));
+ __m256i vaccGHIJKLMN = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vxGHIJKLMN, vx_multiplier));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vy01234567, vy_multiplier));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vy89ABCDEF, vy_multiplier));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vyGHIJKLMN, vy_multiplier));
+
+ const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
+ const __m256i vrem89ABCDEF = _mm256_add_epi32(_mm256_and_si256(vacc89ABCDEF, vremainder_mask), _mm256_srai_epi32(vacc89ABCDEF, 31));
+ const __m256i vremGHIJKLMN = _mm256_add_epi32(_mm256_and_si256(vaccGHIJKLMN, vremainder_mask), _mm256_srai_epi32(vaccGHIJKLMN, 31));
+
+ vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+ vacc89ABCDEF = _mm256_sub_epi32(_mm256_sra_epi32(vacc89ABCDEF, vshift), _mm256_cmpgt_epi32(vrem89ABCDEF, vremainder_threshold));
+ vaccGHIJKLMN = _mm256_sub_epi32(_mm256_sra_epi32(vaccGHIJKLMN, vshift), _mm256_cmpgt_epi32(vremGHIJKLMN, vremainder_threshold));
+
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
+ __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vaccGHIJKLMN), _mm256_extracti128_si256(vaccGHIJKLMN, 1)), _mm256_castsi256_si128(voutput_zero_point));
+
+ vout012389AB4567CDEF = _mm256_min_epi16(_mm256_max_epi16(vout012389AB4567CDEF, voutput_min), voutput_max);
+ voutGHIJKLMN = _mm_min_epi16(_mm_max_epi16(voutGHIJKLMN, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max));
+
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+ __m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ _mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
+ output += 24;
+ }
+ if XNN_UNLIKELY(n != 0) {
+ do {
+ const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ const __m256i vy01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_y));
+ input_x += 8;
+ input_y += 8;
+
+ __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vy01234567, vy_multiplier));
+
+ const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
+
+ vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), _mm256_castsi256_si128(voutput_zero_point));
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max));
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ n -= 8 * sizeof(int8_t);
+ } else {
+ if (n & (4 * sizeof(int8_t))) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (n & (2 * sizeof(int8_t))) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (n & (1 * sizeof(int8_t))) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ }
+ n = 0;
+ }
+ } while (n != 0);
+ }
+}
diff --git a/src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x32.c b/src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x32.c
new file mode 100644
index 000000000..19b3b4b0c
--- /dev/null
+++ b/src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x32.c
@@ -0,0 +1,121 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-vadd/avx2-mul32-ld64.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vadd.h>
+
+
+void xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32(
+ size_t n,
+ const int8_t* input_x,
+ const int8_t* input_y,
+ int8_t* output,
+ const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ const __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.zero_point_product));
+ const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier));
+ const __m256i vy_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.y_multiplier));
+ const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask));
+ const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold));
+ const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
+ const __m256i voutput_zero_point = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_zero_point));
+ const __m256i voutput_min = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_min));
+ const __m256i voutput_max = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_max));
+
+ for (; n >= 32 * sizeof(int8_t); n -= 32 * sizeof(int8_t)) {
+ const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ const __m256i vy01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_y));
+ const __m256i vx89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 8)));
+ const __m256i vy89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_y + 8)));
+ const __m256i vxGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 16)));
+ const __m256i vyGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_y + 16)));
+ const __m256i vxOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 24)));
+ const __m256i vyOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_y + 24)));
+ input_x += 32;
+ input_y += 32;
+
+ __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
+ __m256i vacc89ABCDEF = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx89ABCDEF, vx_multiplier));
+ __m256i vaccGHIJKLMN = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vxGHIJKLMN, vx_multiplier));
+ __m256i vaccOPQRSTUV = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vxOPQRSTUV, vx_multiplier));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vy01234567, vy_multiplier));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vy89ABCDEF, vy_multiplier));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vyGHIJKLMN, vy_multiplier));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vyOPQRSTUV, vy_multiplier));
+
+ const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
+ const __m256i vrem89ABCDEF = _mm256_add_epi32(_mm256_and_si256(vacc89ABCDEF, vremainder_mask), _mm256_srai_epi32(vacc89ABCDEF, 31));
+ const __m256i vremGHIJKLMN = _mm256_add_epi32(_mm256_and_si256(vaccGHIJKLMN, vremainder_mask), _mm256_srai_epi32(vaccGHIJKLMN, 31));
+ const __m256i vremOPQRSTUV = _mm256_add_epi32(_mm256_and_si256(vaccOPQRSTUV, vremainder_mask), _mm256_srai_epi32(vaccOPQRSTUV, 31));
+
+ vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+ vacc89ABCDEF = _mm256_sub_epi32(_mm256_sra_epi32(vacc89ABCDEF, vshift), _mm256_cmpgt_epi32(vrem89ABCDEF, vremainder_threshold));
+ vaccGHIJKLMN = _mm256_sub_epi32(_mm256_sra_epi32(vaccGHIJKLMN, vshift), _mm256_cmpgt_epi32(vremGHIJKLMN, vremainder_threshold));
+ vaccOPQRSTUV = _mm256_sub_epi32(_mm256_sra_epi32(vaccOPQRSTUV, vshift), _mm256_cmpgt_epi32(vremOPQRSTUV, vremainder_threshold));
+
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
+ __m256i voutGHIJOPQRKLMNSTUV = _mm256_adds_epi16(_mm256_packs_epi32(vaccGHIJKLMN, vaccOPQRSTUV), voutput_zero_point);
+
+ vout012389AB4567CDEF = _mm256_min_epi16(_mm256_max_epi16(vout012389AB4567CDEF, voutput_min), voutput_max);
+ voutGHIJOPQRKLMNSTUV = _mm256_min_epi16(_mm256_max_epi16(voutGHIJOPQRKLMNSTUV, voutput_min), voutput_max);
+
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+ __m128i voutGHIJKLMNOPQRSTUV = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(voutGHIJOPQRKLMNSTUV), _mm256_extracti128_si256(voutGHIJOPQRKLMNSTUV, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ _mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
+ output += 32;
+ }
+ if XNN_UNLIKELY(n != 0) {
+ do {
+ const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ const __m256i vy01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_y));
+ input_x += 8;
+ input_y += 8;
+
+ __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vy01234567, vy_multiplier));
+
+ const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
+
+ vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), _mm256_castsi256_si128(voutput_zero_point));
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max));
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ n -= 8 * sizeof(int8_t);
+ } else {
+ if (n & (4 * sizeof(int8_t))) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (n & (2 * sizeof(int8_t))) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (n & (1 * sizeof(int8_t))) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ }
+ n = 0;
+ }
+ } while (n != 0);
+ }
+}
diff --git a/src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x8.c b/src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x8.c
new file mode 100644
index 000000000..b26c7bf30
--- /dev/null
+++ b/src/qs8-vadd/gen/minmax-avx2-mul32-ld64-x8.c
@@ -0,0 +1,90 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-vadd/avx2-mul32-ld64.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vadd.h>
+
+
+void xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8(
+ size_t n,
+ const int8_t* input_x,
+ const int8_t* input_y,
+ int8_t* output,
+ const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ const __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.zero_point_product));
+ const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier));
+ const __m256i vy_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.y_multiplier));
+ const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask));
+ const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold));
+ const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+
+ for (; n >= 8 * sizeof(int8_t); n -= 8 * sizeof(int8_t)) {
+ const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ const __m256i vy01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_y));
+ input_x += 8;
+ input_y += 8;
+
+ __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vy01234567, vy_multiplier));
+
+ const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
+
+ vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ }
+ if XNN_UNLIKELY(n != 0) {
+ {
+ const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ const __m256i vy01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_y));
+
+ __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vy01234567, vy_multiplier));
+
+ const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
+
+ vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if (n & (4 * sizeof(int8_t))) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (n & (2 * sizeof(int8_t))) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (n & (1 * sizeof(int8_t))) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ }
+ }
+ }
+}
diff --git a/src/qs8-vaddc/avx2-mul32-ld64.c.in b/src/qs8-vaddc/avx2-mul32-ld64.c.in
new file mode 100644
index 000000000..6b29b59eb
--- /dev/null
+++ b/src/qs8-vaddc/avx2-mul32-ld64.c.in
@@ -0,0 +1,145 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert BATCH_TILE % 8 == 0
+$assert BATCH_TILE >= 8
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vadd.h>
+
+
+void xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x${BATCH_TILE}(
+ size_t n,
+ const int8_t* input_x,
+ const int8_t* input_y,
+ int8_t* output,
+ const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier));
+ const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask));
+ const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold));
+ const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
+ $if BATCH_TILE > 8:
+ const __m256i voutput_zero_point = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_zero_point));
+ const __m256i voutput_min = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_min));
+ const __m256i voutput_max = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_max));
+ $else:
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+
+ __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_add_epi32(
+ _mm_broadcastd_epi32(_mm_cvtsi32_si128(params->sse2.y_multiplier[0] * (int32_t) *input_y)),
+ _mm_load_si128((const __m128i*) params->sse2.zero_point_product)));
+ for (; n >= ${BATCH_TILE} * sizeof(int8_t); n -= ${BATCH_TILE} * sizeof(int8_t)) {
+ const __m256i vx${ABC[0:8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ $for N in range(8, BATCH_TILE, 8):
+ const __m256i vx${ABC[N:N+8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + ${N})));
+ input_x += ${BATCH_TILE};
+
+ $for N in range(0, BATCH_TILE, 8):
+ __m256i vacc${ABC[N:N+8]} = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx${ABC[N:N+8]}, vx_multiplier));
+
+ $for N in range(0, BATCH_TILE, 8):
+ const __m256i vrem${ABC[N:N+8]} = _mm256_add_epi32(_mm256_and_si256(vacc${ABC[N:N+8]}, vremainder_mask), _mm256_srai_epi32(vacc${ABC[N:N+8]}, 31));
+
+ $for N in range(0, BATCH_TILE, 8):
+ vacc${ABC[N:N+8]} = _mm256_sub_epi32(_mm256_sra_epi32(vacc${ABC[N:N+8]}, vshift), _mm256_cmpgt_epi32(vrem${ABC[N:N+8]}, vremainder_threshold));
+
+ $for N in range(0, BATCH_TILE, 16):
+ $if N + 8 < BATCH_TILE:
+ __m256i vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]} = _mm256_adds_epi16(_mm256_packs_epi32(vacc${ABC[N:N+8]}, vacc${ABC[N+8:N+16]}), voutput_zero_point);
+ $elif BATCH_TILE > 8:
+ __m128i vout${ABC[N:N+8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[N:N+8]}), _mm256_extracti128_si256(vacc${ABC[N:N+8]}, 1)), _mm256_castsi256_si128(voutput_zero_point));
+ $else:
+ __m128i vout${ABC[N:N+8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[N:N+8]}), _mm256_extracti128_si256(vacc${ABC[N:N+8]}, 1)), voutput_zero_point);
+
+ $for N in range(0, BATCH_TILE, 16):
+ $if N + 8 < BATCH_TILE:
+ vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]} = _mm256_min_epi16(_mm256_max_epi16(vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]}, voutput_min), voutput_max);
+ $elif BATCH_TILE > 8:
+ vout${ABC[N:N+8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[N:N+8]}, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max));
+ $else:
+ vout${ABC[N:N+8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[N:N+8]}, voutput_min), voutput_max);
+
+ $for N in range(0, BATCH_TILE, 16):
+ $if N + 8 < BATCH_TILE:
+ __m128i vout${ABC[N:N+16]} = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]}), _mm256_extracti128_si256(vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]}, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+ $else:
+ __m128i vout${ABC[N:N+8]}${ABC[N:N+8]} = _mm_packs_epi16(vout${ABC[N:N+8]}, vout${ABC[N:N+8]});
+
+ $if BATCH_TILE >= 16:
+ _mm_storeu_si128((__m128i*) output, vout${ABC[0:16]});
+ $else:
+ _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]});
+ $for N in range(16, BATCH_TILE, 16):
+ $if N + 8 < BATCH_TILE:
+ _mm_storeu_si128((__m128i*) (output + ${N}), vout${ABC[N:N+16]});
+ $else:
+ _mm_storel_epi64((__m128i*) (output + ${N}), vout${ABC[N:N+8]}${ABC[N:N+8]});
+ output += ${BATCH_TILE};
+ }
+ if XNN_UNLIKELY(n != 0) {
+ ${"do " if BATCH_TILE > 8 else ""}{
+ const __m256i vx${ABC[0:8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ $if BATCH_TILE > 8:
+ input_x += 8;
+
+ __m256i vacc${ABC[0:8]} = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx${ABC[0:8]}, vx_multiplier));
+
+ const __m256i vrem${ABC[0:8]} = _mm256_add_epi32(_mm256_and_si256(vacc${ABC[0:8]}, vremainder_mask), _mm256_srai_epi32(vacc${ABC[0:8]}, 31));
+
+ vacc${ABC[0:8]} = _mm256_sub_epi32(_mm256_sra_epi32(vacc${ABC[0:8]}, vshift), _mm256_cmpgt_epi32(vrem${ABC[0:8]}, vremainder_threshold));
+
+ $if BATCH_TILE > 8:
+ __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[0:8]}), _mm256_extracti128_si256(vacc${ABC[0:8]}, 1)), _mm256_castsi256_si128(voutput_zero_point));
+ vout${ABC[0:8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[0:8]}, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max));
+ $else:
+ __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[0:8]}), _mm256_extracti128_si256(vacc${ABC[0:8]}, 1)), voutput_zero_point);
+ vout${ABC[0:8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[0:8]}, voutput_min), voutput_max);
+ __m128i vout${ABC[0:8]}${ABC[0:8]} = _mm_packs_epi16(vout${ABC[0:8]}, vout${ABC[0:8]});
+
+ $if BATCH_TILE > 8:
+ if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
+ _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]});
+ output += 8;
+ n -= 8 * sizeof(int8_t);
+ } else {
+ if (n & (4 * sizeof(int8_t))) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
+ vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32);
+ output += 4;
+ }
+ if (n & (2 * sizeof(int8_t))) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
+ vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16);
+ output += 2;
+ }
+ if (n & (1 * sizeof(int8_t))) {
+ *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
+ }
+ n = 0;
+ }
+ $else:
+ if (n & (4 * sizeof(int8_t))) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
+ vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32);
+ output += 4;
+ }
+ if (n & (2 * sizeof(int8_t))) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
+ vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16);
+ output += 2;
+ }
+ if (n & (1 * sizeof(int8_t))) {
+ *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
+ }
+ }${" while (n != 0);" if BATCH_TILE > 8 else ""}
+ }
+}
diff --git a/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x16.c b/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x16.c
new file mode 100644
index 000000000..94cf51a03
--- /dev/null
+++ b/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x16.c
@@ -0,0 +1,96 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-vaddc/avx2-mul32-ld64.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vadd.h>
+
+
+void xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16(
+ size_t n,
+ const int8_t* input_x,
+ const int8_t* input_y,
+ int8_t* output,
+ const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier));
+ const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask));
+ const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold));
+ const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
+ const __m256i voutput_zero_point = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_zero_point));
+ const __m256i voutput_min = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_min));
+ const __m256i voutput_max = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_max));
+
+ __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_add_epi32(
+ _mm_broadcastd_epi32(_mm_cvtsi32_si128(params->sse2.y_multiplier[0] * (int32_t) *input_y)),
+ _mm_load_si128((const __m128i*) params->sse2.zero_point_product)));
+ for (; n >= 16 * sizeof(int8_t); n -= 16 * sizeof(int8_t)) {
+ const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ const __m256i vx89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 8)));
+ input_x += 16;
+
+ __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
+ __m256i vacc89ABCDEF = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx89ABCDEF, vx_multiplier));
+
+ const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
+ const __m256i vrem89ABCDEF = _mm256_add_epi32(_mm256_and_si256(vacc89ABCDEF, vremainder_mask), _mm256_srai_epi32(vacc89ABCDEF, 31));
+
+ vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+ vacc89ABCDEF = _mm256_sub_epi32(_mm256_sra_epi32(vacc89ABCDEF, vshift), _mm256_cmpgt_epi32(vrem89ABCDEF, vremainder_threshold));
+
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
+
+ vout012389AB4567CDEF = _mm256_min_epi16(_mm256_max_epi16(vout012389AB4567CDEF, voutput_min), voutput_max);
+
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ output += 16;
+ }
+ if XNN_UNLIKELY(n != 0) {
+ do {
+ const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ input_x += 8;
+
+ __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
+
+ const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
+
+ vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), _mm256_castsi256_si128(voutput_zero_point));
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max));
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ n -= 8 * sizeof(int8_t);
+ } else {
+ if (n & (4 * sizeof(int8_t))) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (n & (2 * sizeof(int8_t))) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (n & (1 * sizeof(int8_t))) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ }
+ n = 0;
+ }
+ } while (n != 0);
+ }
+}
diff --git a/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x24.c b/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x24.c
new file mode 100644
index 000000000..729963e8e
--- /dev/null
+++ b/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x24.c
@@ -0,0 +1,104 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-vaddc/avx2-mul32-ld64.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vadd.h>
+
+
+void xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24(
+ size_t n,
+ const int8_t* input_x,
+ const int8_t* input_y,
+ int8_t* output,
+ const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier));
+ const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask));
+ const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold));
+ const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
+ const __m256i voutput_zero_point = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_zero_point));
+ const __m256i voutput_min = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_min));
+ const __m256i voutput_max = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_max));
+
+ __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_add_epi32(
+ _mm_broadcastd_epi32(_mm_cvtsi32_si128(params->sse2.y_multiplier[0] * (int32_t) *input_y)),
+ _mm_load_si128((const __m128i*) params->sse2.zero_point_product)));
+ for (; n >= 24 * sizeof(int8_t); n -= 24 * sizeof(int8_t)) {
+ const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ const __m256i vx89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 8)));
+ const __m256i vxGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 16)));
+ input_x += 24;
+
+ __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
+ __m256i vacc89ABCDEF = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx89ABCDEF, vx_multiplier));
+ __m256i vaccGHIJKLMN = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vxGHIJKLMN, vx_multiplier));
+
+ const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
+ const __m256i vrem89ABCDEF = _mm256_add_epi32(_mm256_and_si256(vacc89ABCDEF, vremainder_mask), _mm256_srai_epi32(vacc89ABCDEF, 31));
+ const __m256i vremGHIJKLMN = _mm256_add_epi32(_mm256_and_si256(vaccGHIJKLMN, vremainder_mask), _mm256_srai_epi32(vaccGHIJKLMN, 31));
+
+ vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+ vacc89ABCDEF = _mm256_sub_epi32(_mm256_sra_epi32(vacc89ABCDEF, vshift), _mm256_cmpgt_epi32(vrem89ABCDEF, vremainder_threshold));
+ vaccGHIJKLMN = _mm256_sub_epi32(_mm256_sra_epi32(vaccGHIJKLMN, vshift), _mm256_cmpgt_epi32(vremGHIJKLMN, vremainder_threshold));
+
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
+ __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vaccGHIJKLMN), _mm256_extracti128_si256(vaccGHIJKLMN, 1)), _mm256_castsi256_si128(voutput_zero_point));
+
+ vout012389AB4567CDEF = _mm256_min_epi16(_mm256_max_epi16(vout012389AB4567CDEF, voutput_min), voutput_max);
+ voutGHIJKLMN = _mm_min_epi16(_mm_max_epi16(voutGHIJKLMN, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max));
+
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+ __m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ _mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
+ output += 24;
+ }
+ if XNN_UNLIKELY(n != 0) {
+ do {
+ const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ input_x += 8;
+
+ __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
+
+ const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
+
+ vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), _mm256_castsi256_si128(voutput_zero_point));
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max));
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ n -= 8 * sizeof(int8_t);
+ } else {
+ if (n & (4 * sizeof(int8_t))) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (n & (2 * sizeof(int8_t))) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (n & (1 * sizeof(int8_t))) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ }
+ n = 0;
+ }
+ } while (n != 0);
+ }
+}
diff --git a/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x32.c b/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x32.c
new file mode 100644
index 000000000..b2dab8f1f
--- /dev/null
+++ b/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x32.c
@@ -0,0 +1,108 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-vaddc/avx2-mul32-ld64.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vadd.h>
+
+
+void xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32(
+ size_t n,
+ const int8_t* input_x,
+ const int8_t* input_y,
+ int8_t* output,
+ const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier));
+ const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask));
+ const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold));
+ const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
+ const __m256i voutput_zero_point = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_zero_point));
+ const __m256i voutput_min = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_min));
+ const __m256i voutput_max = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_max));
+
+ __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_add_epi32(
+ _mm_broadcastd_epi32(_mm_cvtsi32_si128(params->sse2.y_multiplier[0] * (int32_t) *input_y)),
+ _mm_load_si128((const __m128i*) params->sse2.zero_point_product)));
+ for (; n >= 32 * sizeof(int8_t); n -= 32 * sizeof(int8_t)) {
+ const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ const __m256i vx89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 8)));
+ const __m256i vxGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 16)));
+ const __m256i vxOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 24)));
+ input_x += 32;
+
+ __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
+ __m256i vacc89ABCDEF = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx89ABCDEF, vx_multiplier));
+ __m256i vaccGHIJKLMN = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vxGHIJKLMN, vx_multiplier));
+ __m256i vaccOPQRSTUV = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vxOPQRSTUV, vx_multiplier));
+
+ const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
+ const __m256i vrem89ABCDEF = _mm256_add_epi32(_mm256_and_si256(vacc89ABCDEF, vremainder_mask), _mm256_srai_epi32(vacc89ABCDEF, 31));
+ const __m256i vremGHIJKLMN = _mm256_add_epi32(_mm256_and_si256(vaccGHIJKLMN, vremainder_mask), _mm256_srai_epi32(vaccGHIJKLMN, 31));
+ const __m256i vremOPQRSTUV = _mm256_add_epi32(_mm256_and_si256(vaccOPQRSTUV, vremainder_mask), _mm256_srai_epi32(vaccOPQRSTUV, 31));
+
+ vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+ vacc89ABCDEF = _mm256_sub_epi32(_mm256_sra_epi32(vacc89ABCDEF, vshift), _mm256_cmpgt_epi32(vrem89ABCDEF, vremainder_threshold));
+ vaccGHIJKLMN = _mm256_sub_epi32(_mm256_sra_epi32(vaccGHIJKLMN, vshift), _mm256_cmpgt_epi32(vremGHIJKLMN, vremainder_threshold));
+ vaccOPQRSTUV = _mm256_sub_epi32(_mm256_sra_epi32(vaccOPQRSTUV, vshift), _mm256_cmpgt_epi32(vremOPQRSTUV, vremainder_threshold));
+
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
+ __m256i voutGHIJOPQRKLMNSTUV = _mm256_adds_epi16(_mm256_packs_epi32(vaccGHIJKLMN, vaccOPQRSTUV), voutput_zero_point);
+
+ vout012389AB4567CDEF = _mm256_min_epi16(_mm256_max_epi16(vout012389AB4567CDEF, voutput_min), voutput_max);
+ voutGHIJOPQRKLMNSTUV = _mm256_min_epi16(_mm256_max_epi16(voutGHIJOPQRKLMNSTUV, voutput_min), voutput_max);
+
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+ __m128i voutGHIJKLMNOPQRSTUV = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(voutGHIJOPQRKLMNSTUV), _mm256_extracti128_si256(voutGHIJOPQRKLMNSTUV, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ _mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
+ output += 32;
+ }
+ if XNN_UNLIKELY(n != 0) {
+ do {
+ const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ input_x += 8;
+
+ __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
+
+ const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
+
+ vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), _mm256_castsi256_si128(voutput_zero_point));
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max));
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ n -= 8 * sizeof(int8_t);
+ } else {
+ if (n & (4 * sizeof(int8_t))) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (n & (2 * sizeof(int8_t))) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (n & (1 * sizeof(int8_t))) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ }
+ n = 0;
+ }
+ } while (n != 0);
+ }
+}
diff --git a/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x8.c b/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x8.c
new file mode 100644
index 000000000..127c635c1
--- /dev/null
+++ b/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x8.c
@@ -0,0 +1,84 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-vaddc/avx2-mul32-ld64.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/vadd.h>
+
+
+void xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8(
+ size_t n,
+ const int8_t* input_x,
+ const int8_t* input_y,
+ int8_t* output,
+ const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier));
+ const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask));
+ const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold));
+ const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+
+ __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_add_epi32(
+ _mm_broadcastd_epi32(_mm_cvtsi32_si128(params->sse2.y_multiplier[0] * (int32_t) *input_y)),
+ _mm_load_si128((const __m128i*) params->sse2.zero_point_product)));
+ for (; n >= 8 * sizeof(int8_t); n -= 8 * sizeof(int8_t)) {
+ const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+ input_x += 8;
+
+ __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
+
+ const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
+
+ vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ }
+ if XNN_UNLIKELY(n != 0) {
+ {
+ const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x));
+
+ __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier));
+
+ const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31));
+
+ vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if (n & (4 * sizeof(int8_t))) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (n & (2 * sizeof(int8_t))) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (n & (1 * sizeof(int8_t))) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ }
+ }
+ }
+}
diff --git a/src/xnnpack/vadd.h b/src/xnnpack/vadd.h
index 919adc8e8..9215127de 100644
--- a/src/xnnpack/vadd.h
+++ b/src/xnnpack/vadd.h
@@ -65,6 +65,11 @@ DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__xop_mul32_
DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__xop_mul32_ld32_x24)
DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__xop_mul32_ld32_x32)
+DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8)
+DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16)
+DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24)
+DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32)
+
DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__wasmsimd_x8)
DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__wasmsimd_x16)
DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vadd_minmax_ukernel__wasmsimd_x24)
@@ -95,6 +100,11 @@ DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__xop_mul32
DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__xop_mul32_ld32_x24)
DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__xop_mul32_ld32_x32)
+DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8)
+DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16)
+DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24)
+DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32)
+
DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__wasmsimd_x8)
DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__wasmsimd_x16)
DECLARE_QS8_VADD_MINMAX_UKERNEL_FUNCTION(xnn_qs8_vaddc_minmax_ukernel__wasmsimd_x24)
diff --git a/test/qs8-vadd-minmax.cc b/test/qs8-vadd-minmax.cc
index d3ca1ff85..841e723ba 100644
--- a/test/qs8-vadd-minmax.cc
+++ b/test/qs8-vadd-minmax.cc
@@ -3217,6 +3217,646 @@
#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X8, batch_eq_8) {
+ TEST_REQUIRES_X86_AVX2;
+ VAddMicrokernelTester()
+ .batch_size(8)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X8, batch_div_8) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X8, batch_lt_8) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X8, batch_gt_8) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X8, inplace_a) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .inplace_a(true)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X8, inplace_b) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .inplace_b(true)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X8, inplace_a_and_b) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .inplace_a(true)
+ .inplace_b(true)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X8, a_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ for (int32_t a_zero_point = -128; a_zero_point <= 127; a_zero_point += 51) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .a_zero_point(a_zero_point)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X8, b_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ for (int32_t b_zero_point = -128; b_zero_point <= 127; b_zero_point += 51) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .b_zero_point(b_zero_point)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X8, y_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ for (int32_t y_zero_point = -128; y_zero_point <= 127; y_zero_point += 51) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .y_zero_point(y_zero_point)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X8, a_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .a_scale(a_scale)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X8, b_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ for (float b_scale = 0.1f; b_scale <= 10.0f; b_scale *= 3.14f) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .b_scale(b_scale)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X8, y_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ for (float y_scale = 0.1f; y_scale <= 10.0f; y_scale *= 3.14f) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .y_scale(y_scale)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X8, qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .qmin(128)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X8, qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .qmax(128)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X16, batch_eq_16) {
+ TEST_REQUIRES_X86_AVX2;
+ VAddMicrokernelTester()
+ .batch_size(16)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X16, batch_div_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X16, batch_lt_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X16, batch_gt_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X16, inplace_a) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .inplace_a(true)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X16, inplace_b) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .inplace_b(true)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X16, inplace_a_and_b) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .inplace_a(true)
+ .inplace_b(true)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X16, a_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ for (int32_t a_zero_point = -128; a_zero_point <= 127; a_zero_point += 51) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .a_zero_point(a_zero_point)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X16, b_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ for (int32_t b_zero_point = -128; b_zero_point <= 127; b_zero_point += 51) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .b_zero_point(b_zero_point)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X16, y_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ for (int32_t y_zero_point = -128; y_zero_point <= 127; y_zero_point += 51) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .y_zero_point(y_zero_point)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X16, a_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .a_scale(a_scale)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X16, b_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ for (float b_scale = 0.1f; b_scale <= 10.0f; b_scale *= 3.14f) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .b_scale(b_scale)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X16, y_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ for (float y_scale = 0.1f; y_scale <= 10.0f; y_scale *= 3.14f) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .y_scale(y_scale)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X16, qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .qmin(128)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X16, qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .qmax(128)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X24, batch_eq_24) {
+ TEST_REQUIRES_X86_AVX2;
+ VAddMicrokernelTester()
+ .batch_size(24)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X24, batch_div_24) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X24, batch_lt_24) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X24, batch_gt_24) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X24, inplace_a) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .inplace_a(true)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X24, inplace_b) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .inplace_b(true)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X24, inplace_a_and_b) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .inplace_a(true)
+ .inplace_b(true)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X24, a_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ for (int32_t a_zero_point = -128; a_zero_point <= 127; a_zero_point += 51) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .a_zero_point(a_zero_point)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X24, b_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ for (int32_t b_zero_point = -128; b_zero_point <= 127; b_zero_point += 51) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .b_zero_point(b_zero_point)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X24, y_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ for (int32_t y_zero_point = -128; y_zero_point <= 127; y_zero_point += 51) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .y_zero_point(y_zero_point)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X24, a_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .a_scale(a_scale)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X24, b_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ for (float b_scale = 0.1f; b_scale <= 10.0f; b_scale *= 3.14f) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .b_scale(b_scale)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X24, y_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ for (float y_scale = 0.1f; y_scale <= 10.0f; y_scale *= 3.14f) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .y_scale(y_scale)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X24, qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .qmin(128)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X24, qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .qmax(128)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X32, batch_eq_32) {
+ TEST_REQUIRES_X86_AVX2;
+ VAddMicrokernelTester()
+ .batch_size(32)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X32, batch_div_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X32, batch_lt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X32, batch_gt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X32, inplace_a) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .inplace_a(true)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X32, inplace_b) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .inplace_b(true)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X32, inplace_a_and_b) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .inplace_a(true)
+ .inplace_b(true)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X32, a_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ for (int32_t a_zero_point = -128; a_zero_point <= 127; a_zero_point += 51) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .a_zero_point(a_zero_point)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X32, b_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ for (int32_t b_zero_point = -128; b_zero_point <= 127; b_zero_point += 51) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .b_zero_point(b_zero_point)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X32, y_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ for (int32_t y_zero_point = -128; y_zero_point <= 127; y_zero_point += 51) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .y_zero_point(y_zero_point)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X32, a_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .a_scale(a_scale)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X32, b_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ for (float b_scale = 0.1f; b_scale <= 10.0f; b_scale *= 3.14f) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .b_scale(b_scale)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X32, y_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ for (float y_scale = 0.1f; y_scale <= 10.0f; y_scale *= 3.14f) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .y_scale(y_scale)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X32, qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .qmin(128)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+
+ TEST(QS8_VADD_MINMAX__AVX2_MUL32_LD64_X32, qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ VAddMicrokernelTester()
+ .batch_size(batch_size)
+ .qmax(128)
+ .Test(xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
#if XNN_ARCH_WASMSIMD
TEST(QS8_VADD_MINMAX__WASMSIMD_X8, batch_eq_8) {
VAddMicrokernelTester()
diff --git a/test/qs8-vadd-minmax.yaml b/test/qs8-vadd-minmax.yaml
index 42bec17aa..58898d07f 100644
--- a/test/qs8-vadd-minmax.yaml
+++ b/test/qs8-vadd-minmax.yaml
@@ -22,6 +22,10 @@
- name: xnn_qs8_vadd_minmax_ukernel__xop_mul32_ld32_x16
- name: xnn_qs8_vadd_minmax_ukernel__xop_mul32_ld32_x24
- name: xnn_qs8_vadd_minmax_ukernel__xop_mul32_ld32_x32
+- name: xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x8
+- name: xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x16
+- name: xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x24
+- name: xnn_qs8_vadd_minmax_ukernel__avx2_mul32_ld64_x32
- name: xnn_qs8_vadd_minmax_ukernel__wasmsimd_x8
- name: xnn_qs8_vadd_minmax_ukernel__wasmsimd_x16
- name: xnn_qs8_vadd_minmax_ukernel__wasmsimd_x24
diff --git a/test/qs8-vaddc-minmax.cc b/test/qs8-vaddc-minmax.cc
index 7bb8ddae3..79dce5be9 100644
--- a/test/qs8-vaddc-minmax.cc
+++ b/test/qs8-vaddc-minmax.cc
@@ -2797,6 +2797,562 @@
#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X8, batch_eq_8) {
+ TEST_REQUIRES_X86_AVX2;
+ VAddCMicrokernelTester()
+ .batch_size(8)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X8, batch_div_8) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 16; batch_size < 80; batch_size += 8) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X8, batch_lt_8) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size < 8; batch_size++) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X8, batch_gt_8) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 9; batch_size < 16; batch_size++) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X8, inplace) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .inplace(true)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X8, a_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ for (int32_t a_zero_point = -128; a_zero_point <= 127; a_zero_point += 51) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .a_zero_point(a_zero_point)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X8, b_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ for (int32_t b_zero_point = -128; b_zero_point <= 127; b_zero_point += 51) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .b_zero_point(b_zero_point)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X8, y_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ for (int32_t y_zero_point = -128; y_zero_point <= 127; y_zero_point += 51) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .y_zero_point(y_zero_point)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X8, a_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .a_scale(a_scale)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X8, b_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ for (float b_scale = 0.1f; b_scale <= 10.0f; b_scale *= 3.14f) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .b_scale(b_scale)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X8, y_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ for (float y_scale = 0.1f; y_scale <= 10.0f; y_scale *= 3.14f) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .y_scale(y_scale)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X8, qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .qmin(128)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X8, qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 40; batch_size += 7) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .qmax(128)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X16, batch_eq_16) {
+ TEST_REQUIRES_X86_AVX2;
+ VAddCMicrokernelTester()
+ .batch_size(16)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X16, batch_div_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 32; batch_size < 160; batch_size += 16) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X16, batch_lt_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size < 16; batch_size++) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X16, batch_gt_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 17; batch_size < 32; batch_size++) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X16, inplace) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .inplace(true)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X16, a_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ for (int32_t a_zero_point = -128; a_zero_point <= 127; a_zero_point += 51) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .a_zero_point(a_zero_point)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X16, b_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ for (int32_t b_zero_point = -128; b_zero_point <= 127; b_zero_point += 51) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .b_zero_point(b_zero_point)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X16, y_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ for (int32_t y_zero_point = -128; y_zero_point <= 127; y_zero_point += 51) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .y_zero_point(y_zero_point)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X16, a_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .a_scale(a_scale)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X16, b_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ for (float b_scale = 0.1f; b_scale <= 10.0f; b_scale *= 3.14f) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .b_scale(b_scale)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X16, y_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ for (float y_scale = 0.1f; y_scale <= 10.0f; y_scale *= 3.14f) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .y_scale(y_scale)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X16, qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .qmin(128)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X16, qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 80; batch_size += 15) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .qmax(128)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X24, batch_eq_24) {
+ TEST_REQUIRES_X86_AVX2;
+ VAddCMicrokernelTester()
+ .batch_size(24)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X24, batch_div_24) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 48; batch_size < 240; batch_size += 24) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X24, batch_lt_24) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size < 24; batch_size++) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X24, batch_gt_24) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 25; batch_size < 48; batch_size++) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X24, inplace) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .inplace(true)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X24, a_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ for (int32_t a_zero_point = -128; a_zero_point <= 127; a_zero_point += 51) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .a_zero_point(a_zero_point)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X24, b_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ for (int32_t b_zero_point = -128; b_zero_point <= 127; b_zero_point += 51) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .b_zero_point(b_zero_point)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X24, y_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ for (int32_t y_zero_point = -128; y_zero_point <= 127; y_zero_point += 51) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .y_zero_point(y_zero_point)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X24, a_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .a_scale(a_scale)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X24, b_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ for (float b_scale = 0.1f; b_scale <= 10.0f; b_scale *= 3.14f) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .b_scale(b_scale)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X24, y_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ for (float y_scale = 0.1f; y_scale <= 10.0f; y_scale *= 3.14f) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .y_scale(y_scale)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X24, qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .qmin(128)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X24, qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 120; batch_size += 23) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .qmax(128)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X32, batch_eq_32) {
+ TEST_REQUIRES_X86_AVX2;
+ VAddCMicrokernelTester()
+ .batch_size(32)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X32, batch_div_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 64; batch_size < 320; batch_size += 32) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X32, batch_lt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size < 32; batch_size++) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X32, batch_gt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 33; batch_size < 64; batch_size++) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X32, inplace) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .inplace(true)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X32, a_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ for (int32_t a_zero_point = -128; a_zero_point <= 127; a_zero_point += 51) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .a_zero_point(a_zero_point)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X32, b_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ for (int32_t b_zero_point = -128; b_zero_point <= 127; b_zero_point += 51) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .b_zero_point(b_zero_point)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X32, y_zero_point) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ for (int32_t y_zero_point = -128; y_zero_point <= 127; y_zero_point += 51) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .y_zero_point(y_zero_point)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X32, a_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .a_scale(a_scale)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X32, b_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ for (float b_scale = 0.1f; b_scale <= 10.0f; b_scale *= 3.14f) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .b_scale(b_scale)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X32, y_scale) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ for (float y_scale = 0.1f; y_scale <= 10.0f; y_scale *= 3.14f) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .y_scale(y_scale)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X32, qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .qmin(128)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+
+ TEST(QS8_VADDC_MINMAX__AVX2_MUL32_LD64_X32, qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t batch_size = 1; batch_size <= 160; batch_size += 31) {
+ VAddCMicrokernelTester()
+ .batch_size(batch_size)
+ .qmax(128)
+ .Test(xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
#if XNN_ARCH_WASMSIMD
TEST(QS8_VADDC_MINMAX__WASMSIMD_X8, batch_eq_8) {
VAddCMicrokernelTester()
diff --git a/test/qs8-vaddc-minmax.yaml b/test/qs8-vaddc-minmax.yaml
index 223b06878..454d0851f 100644
--- a/test/qs8-vaddc-minmax.yaml
+++ b/test/qs8-vaddc-minmax.yaml
@@ -22,6 +22,10 @@
- name: xnn_qs8_vaddc_minmax_ukernel__xop_mul32_ld32_x16
- name: xnn_qs8_vaddc_minmax_ukernel__xop_mul32_ld32_x24
- name: xnn_qs8_vaddc_minmax_ukernel__xop_mul32_ld32_x32
+- name: xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8
+- name: xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16
+- name: xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24
+- name: xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32
- name: xnn_qs8_vaddc_minmax_ukernel__wasmsimd_x8
- name: xnn_qs8_vaddc_minmax_ukernel__wasmsimd_x16
- name: xnn_qs8_vaddc_minmax_ukernel__wasmsimd_x24