diff options
author | Marat Dukhan <maratek@google.com> | 2020-09-08 23:57:14 -0700 |
---|---|---|
committer | XNNPACK Team <xnnpack-github-robot@google.com> | 2020-09-08 23:57:52 -0700 |
commit | e6dc0b6df1cabbb370650927e810511877813b8a (patch) | |
tree | ae0c472b4fc044a7b511b41f0989a3851048d803 /src/qs8-vaddc | |
parent | bb9225e24044023b0cc0ae14c8b53ff3f0d94419 (diff) | |
download | XNNPACK-e6dc0b6df1cabbb370650927e810511877813b8a.tar.gz |
AVX2 versions of QS8 VADD[C] microkernels
PiperOrigin-RevId: 330666026
Diffstat (limited to 'src/qs8-vaddc')
-rw-r--r-- | src/qs8-vaddc/avx2-mul32-ld64.c.in | 145 | ||||
-rw-r--r-- | src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x16.c | 96 | ||||
-rw-r--r-- | src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x24.c | 104 | ||||
-rw-r--r-- | src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x32.c | 108 | ||||
-rw-r--r-- | src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x8.c | 84 |
5 files changed, 537 insertions, 0 deletions
diff --git a/src/qs8-vaddc/avx2-mul32-ld64.c.in b/src/qs8-vaddc/avx2-mul32-ld64.c.in new file mode 100644 index 000000000..6b29b59eb --- /dev/null +++ b/src/qs8-vaddc/avx2-mul32-ld64.c.in @@ -0,0 +1,145 @@ +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +$assert BATCH_TILE % 8 == 0 +$assert BATCH_TILE >= 8 +$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" +#include <assert.h> + +#include <immintrin.h> + +#include <xnnpack/intrinsics-polyfill.h> +#include <xnnpack/vadd.h> + + +void xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x${BATCH_TILE}( + size_t n, + const int8_t* input_x, + const int8_t* input_y, + int8_t* output, + const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier)); + const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask)); + const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold)); + const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift); + $if BATCH_TILE > 8: + const __m256i voutput_zero_point = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_zero_point)); + const __m256i voutput_min = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_min)); + const __m256i voutput_max = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_max)); + $else: + const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point); + const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min); + const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max); + + __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_add_epi32( + _mm_broadcastd_epi32(_mm_cvtsi32_si128(params->sse2.y_multiplier[0] * (int32_t) *input_y)), + _mm_load_si128((const __m128i*) params->sse2.zero_point_product))); + for (; n >= ${BATCH_TILE} * sizeof(int8_t); n -= ${BATCH_TILE} * sizeof(int8_t)) { + const __m256i vx${ABC[0:8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x)); + $for N in range(8, BATCH_TILE, 8): + const __m256i vx${ABC[N:N+8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + ${N}))); + input_x += ${BATCH_TILE}; + + $for N in range(0, BATCH_TILE, 8): + __m256i vacc${ABC[N:N+8]} = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx${ABC[N:N+8]}, vx_multiplier)); + + $for N in range(0, BATCH_TILE, 8): + const __m256i vrem${ABC[N:N+8]} = _mm256_add_epi32(_mm256_and_si256(vacc${ABC[N:N+8]}, vremainder_mask), _mm256_srai_epi32(vacc${ABC[N:N+8]}, 31)); + + $for N in range(0, BATCH_TILE, 8): + vacc${ABC[N:N+8]} = _mm256_sub_epi32(_mm256_sra_epi32(vacc${ABC[N:N+8]}, vshift), _mm256_cmpgt_epi32(vrem${ABC[N:N+8]}, vremainder_threshold)); + + $for N in range(0, BATCH_TILE, 16): + $if N + 8 < BATCH_TILE: + __m256i vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]} = _mm256_adds_epi16(_mm256_packs_epi32(vacc${ABC[N:N+8]}, vacc${ABC[N+8:N+16]}), voutput_zero_point); + $elif BATCH_TILE > 8: + __m128i vout${ABC[N:N+8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[N:N+8]}), _mm256_extracti128_si256(vacc${ABC[N:N+8]}, 1)), _mm256_castsi256_si128(voutput_zero_point)); + $else: + __m128i vout${ABC[N:N+8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[N:N+8]}), _mm256_extracti128_si256(vacc${ABC[N:N+8]}, 1)), voutput_zero_point); + + $for N in range(0, BATCH_TILE, 16): + $if N + 8 < BATCH_TILE: + vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]} = _mm256_min_epi16(_mm256_max_epi16(vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]}, voutput_min), voutput_max); + $elif BATCH_TILE > 8: + vout${ABC[N:N+8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[N:N+8]}, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max)); + $else: + vout${ABC[N:N+8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[N:N+8]}, voutput_min), voutput_max); + + $for N in range(0, BATCH_TILE, 16): + $if N + 8 < BATCH_TILE: + __m128i vout${ABC[N:N+16]} = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]}), _mm256_extracti128_si256(vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]}, 1)), _MM_SHUFFLE(3, 1, 2, 0)); + $else: + __m128i vout${ABC[N:N+8]}${ABC[N:N+8]} = _mm_packs_epi16(vout${ABC[N:N+8]}, vout${ABC[N:N+8]}); + + $if BATCH_TILE >= 16: + _mm_storeu_si128((__m128i*) output, vout${ABC[0:16]}); + $else: + _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]}); + $for N in range(16, BATCH_TILE, 16): + $if N + 8 < BATCH_TILE: + _mm_storeu_si128((__m128i*) (output + ${N}), vout${ABC[N:N+16]}); + $else: + _mm_storel_epi64((__m128i*) (output + ${N}), vout${ABC[N:N+8]}${ABC[N:N+8]}); + output += ${BATCH_TILE}; + } + if XNN_UNLIKELY(n != 0) { + ${"do " if BATCH_TILE > 8 else ""}{ + const __m256i vx${ABC[0:8]} = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x)); + $if BATCH_TILE > 8: + input_x += 8; + + __m256i vacc${ABC[0:8]} = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx${ABC[0:8]}, vx_multiplier)); + + const __m256i vrem${ABC[0:8]} = _mm256_add_epi32(_mm256_and_si256(vacc${ABC[0:8]}, vremainder_mask), _mm256_srai_epi32(vacc${ABC[0:8]}, 31)); + + vacc${ABC[0:8]} = _mm256_sub_epi32(_mm256_sra_epi32(vacc${ABC[0:8]}, vshift), _mm256_cmpgt_epi32(vrem${ABC[0:8]}, vremainder_threshold)); + + $if BATCH_TILE > 8: + __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[0:8]}), _mm256_extracti128_si256(vacc${ABC[0:8]}, 1)), _mm256_castsi256_si128(voutput_zero_point)); + vout${ABC[0:8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[0:8]}, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max)); + $else: + __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[0:8]}), _mm256_extracti128_si256(vacc${ABC[0:8]}, 1)), voutput_zero_point); + vout${ABC[0:8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[0:8]}, voutput_min), voutput_max); + __m128i vout${ABC[0:8]}${ABC[0:8]} = _mm_packs_epi16(vout${ABC[0:8]}, vout${ABC[0:8]}); + + $if BATCH_TILE > 8: + if XNN_LIKELY(n >= (8 * sizeof(int8_t))) { + _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]}); + output += 8; + n -= 8 * sizeof(int8_t); + } else { + if (n & (4 * sizeof(int8_t))) { + *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}); + vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32); + output += 4; + } + if (n & (2 * sizeof(int8_t))) { + *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0); + vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16); + output += 2; + } + if (n & (1 * sizeof(int8_t))) { + *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0); + } + n = 0; + } + $else: + if (n & (4 * sizeof(int8_t))) { + *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}); + vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32); + output += 4; + } + if (n & (2 * sizeof(int8_t))) { + *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0); + vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16); + output += 2; + } + if (n & (1 * sizeof(int8_t))) { + *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0); + } + }${" while (n != 0);" if BATCH_TILE > 8 else ""} + } +} diff --git a/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x16.c b/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x16.c new file mode 100644 index 000000000..94cf51a03 --- /dev/null +++ b/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x16.c @@ -0,0 +1,96 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-vaddc/avx2-mul32-ld64.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include <assert.h> + +#include <immintrin.h> + +#include <xnnpack/intrinsics-polyfill.h> +#include <xnnpack/vadd.h> + + +void xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x16( + size_t n, + const int8_t* input_x, + const int8_t* input_y, + int8_t* output, + const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier)); + const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask)); + const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold)); + const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift); + const __m256i voutput_zero_point = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_zero_point)); + const __m256i voutput_min = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_min)); + const __m256i voutput_max = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_max)); + + __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_add_epi32( + _mm_broadcastd_epi32(_mm_cvtsi32_si128(params->sse2.y_multiplier[0] * (int32_t) *input_y)), + _mm_load_si128((const __m128i*) params->sse2.zero_point_product))); + for (; n >= 16 * sizeof(int8_t); n -= 16 * sizeof(int8_t)) { + const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x)); + const __m256i vx89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 8))); + input_x += 16; + + __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier)); + __m256i vacc89ABCDEF = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx89ABCDEF, vx_multiplier)); + + const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31)); + const __m256i vrem89ABCDEF = _mm256_add_epi32(_mm256_and_si256(vacc89ABCDEF, vremainder_mask), _mm256_srai_epi32(vacc89ABCDEF, 31)); + + vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold)); + vacc89ABCDEF = _mm256_sub_epi32(_mm256_sra_epi32(vacc89ABCDEF, vshift), _mm256_cmpgt_epi32(vrem89ABCDEF, vremainder_threshold)); + + __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point); + + vout012389AB4567CDEF = _mm256_min_epi16(_mm256_max_epi16(vout012389AB4567CDEF, voutput_min), voutput_max); + + __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0)); + + _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF); + output += 16; + } + if XNN_UNLIKELY(n != 0) { + do { + const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x)); + input_x += 8; + + __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier)); + + const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31)); + + vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold)); + + __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), _mm256_castsi256_si128(voutput_zero_point)); + vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max)); + __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567); + + if XNN_LIKELY(n >= (8 * sizeof(int8_t))) { + _mm_storel_epi64((__m128i*) output, vout0123456701234567); + output += 8; + n -= 8 * sizeof(int8_t); + } else { + if (n & (4 * sizeof(int8_t))) { + *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567); + vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32); + output += 4; + } + if (n & (2 * sizeof(int8_t))) { + *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0); + vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16); + output += 2; + } + if (n & (1 * sizeof(int8_t))) { + *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0); + } + n = 0; + } + } while (n != 0); + } +} diff --git a/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x24.c b/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x24.c new file mode 100644 index 000000000..729963e8e --- /dev/null +++ b/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x24.c @@ -0,0 +1,104 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-vaddc/avx2-mul32-ld64.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include <assert.h> + +#include <immintrin.h> + +#include <xnnpack/intrinsics-polyfill.h> +#include <xnnpack/vadd.h> + + +void xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x24( + size_t n, + const int8_t* input_x, + const int8_t* input_y, + int8_t* output, + const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier)); + const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask)); + const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold)); + const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift); + const __m256i voutput_zero_point = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_zero_point)); + const __m256i voutput_min = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_min)); + const __m256i voutput_max = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_max)); + + __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_add_epi32( + _mm_broadcastd_epi32(_mm_cvtsi32_si128(params->sse2.y_multiplier[0] * (int32_t) *input_y)), + _mm_load_si128((const __m128i*) params->sse2.zero_point_product))); + for (; n >= 24 * sizeof(int8_t); n -= 24 * sizeof(int8_t)) { + const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x)); + const __m256i vx89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 8))); + const __m256i vxGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 16))); + input_x += 24; + + __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier)); + __m256i vacc89ABCDEF = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx89ABCDEF, vx_multiplier)); + __m256i vaccGHIJKLMN = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vxGHIJKLMN, vx_multiplier)); + + const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31)); + const __m256i vrem89ABCDEF = _mm256_add_epi32(_mm256_and_si256(vacc89ABCDEF, vremainder_mask), _mm256_srai_epi32(vacc89ABCDEF, 31)); + const __m256i vremGHIJKLMN = _mm256_add_epi32(_mm256_and_si256(vaccGHIJKLMN, vremainder_mask), _mm256_srai_epi32(vaccGHIJKLMN, 31)); + + vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold)); + vacc89ABCDEF = _mm256_sub_epi32(_mm256_sra_epi32(vacc89ABCDEF, vshift), _mm256_cmpgt_epi32(vrem89ABCDEF, vremainder_threshold)); + vaccGHIJKLMN = _mm256_sub_epi32(_mm256_sra_epi32(vaccGHIJKLMN, vshift), _mm256_cmpgt_epi32(vremGHIJKLMN, vremainder_threshold)); + + __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point); + __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vaccGHIJKLMN), _mm256_extracti128_si256(vaccGHIJKLMN, 1)), _mm256_castsi256_si128(voutput_zero_point)); + + vout012389AB4567CDEF = _mm256_min_epi16(_mm256_max_epi16(vout012389AB4567CDEF, voutput_min), voutput_max); + voutGHIJKLMN = _mm_min_epi16(_mm_max_epi16(voutGHIJKLMN, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max)); + + __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0)); + __m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN); + + _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF); + _mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN); + output += 24; + } + if XNN_UNLIKELY(n != 0) { + do { + const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x)); + input_x += 8; + + __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier)); + + const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31)); + + vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold)); + + __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), _mm256_castsi256_si128(voutput_zero_point)); + vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max)); + __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567); + + if XNN_LIKELY(n >= (8 * sizeof(int8_t))) { + _mm_storel_epi64((__m128i*) output, vout0123456701234567); + output += 8; + n -= 8 * sizeof(int8_t); + } else { + if (n & (4 * sizeof(int8_t))) { + *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567); + vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32); + output += 4; + } + if (n & (2 * sizeof(int8_t))) { + *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0); + vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16); + output += 2; + } + if (n & (1 * sizeof(int8_t))) { + *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0); + } + n = 0; + } + } while (n != 0); + } +} diff --git a/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x32.c b/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x32.c new file mode 100644 index 000000000..b2dab8f1f --- /dev/null +++ b/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x32.c @@ -0,0 +1,108 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-vaddc/avx2-mul32-ld64.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include <assert.h> + +#include <immintrin.h> + +#include <xnnpack/intrinsics-polyfill.h> +#include <xnnpack/vadd.h> + + +void xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x32( + size_t n, + const int8_t* input_x, + const int8_t* input_y, + int8_t* output, + const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier)); + const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask)); + const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold)); + const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift); + const __m256i voutput_zero_point = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_zero_point)); + const __m256i voutput_min = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_min)); + const __m256i voutput_max = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.output_max)); + + __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_add_epi32( + _mm_broadcastd_epi32(_mm_cvtsi32_si128(params->sse2.y_multiplier[0] * (int32_t) *input_y)), + _mm_load_si128((const __m128i*) params->sse2.zero_point_product))); + for (; n >= 32 * sizeof(int8_t); n -= 32 * sizeof(int8_t)) { + const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x)); + const __m256i vx89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 8))); + const __m256i vxGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 16))); + const __m256i vxOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (input_x + 24))); + input_x += 32; + + __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier)); + __m256i vacc89ABCDEF = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx89ABCDEF, vx_multiplier)); + __m256i vaccGHIJKLMN = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vxGHIJKLMN, vx_multiplier)); + __m256i vaccOPQRSTUV = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vxOPQRSTUV, vx_multiplier)); + + const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31)); + const __m256i vrem89ABCDEF = _mm256_add_epi32(_mm256_and_si256(vacc89ABCDEF, vremainder_mask), _mm256_srai_epi32(vacc89ABCDEF, 31)); + const __m256i vremGHIJKLMN = _mm256_add_epi32(_mm256_and_si256(vaccGHIJKLMN, vremainder_mask), _mm256_srai_epi32(vaccGHIJKLMN, 31)); + const __m256i vremOPQRSTUV = _mm256_add_epi32(_mm256_and_si256(vaccOPQRSTUV, vremainder_mask), _mm256_srai_epi32(vaccOPQRSTUV, 31)); + + vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold)); + vacc89ABCDEF = _mm256_sub_epi32(_mm256_sra_epi32(vacc89ABCDEF, vshift), _mm256_cmpgt_epi32(vrem89ABCDEF, vremainder_threshold)); + vaccGHIJKLMN = _mm256_sub_epi32(_mm256_sra_epi32(vaccGHIJKLMN, vshift), _mm256_cmpgt_epi32(vremGHIJKLMN, vremainder_threshold)); + vaccOPQRSTUV = _mm256_sub_epi32(_mm256_sra_epi32(vaccOPQRSTUV, vshift), _mm256_cmpgt_epi32(vremOPQRSTUV, vremainder_threshold)); + + __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point); + __m256i voutGHIJOPQRKLMNSTUV = _mm256_adds_epi16(_mm256_packs_epi32(vaccGHIJKLMN, vaccOPQRSTUV), voutput_zero_point); + + vout012389AB4567CDEF = _mm256_min_epi16(_mm256_max_epi16(vout012389AB4567CDEF, voutput_min), voutput_max); + voutGHIJOPQRKLMNSTUV = _mm256_min_epi16(_mm256_max_epi16(voutGHIJOPQRKLMNSTUV, voutput_min), voutput_max); + + __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0)); + __m128i voutGHIJKLMNOPQRSTUV = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(voutGHIJOPQRKLMNSTUV), _mm256_extracti128_si256(voutGHIJOPQRKLMNSTUV, 1)), _MM_SHUFFLE(3, 1, 2, 0)); + + _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF); + _mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV); + output += 32; + } + if XNN_UNLIKELY(n != 0) { + do { + const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x)); + input_x += 8; + + __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier)); + + const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31)); + + vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold)); + + __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), _mm256_castsi256_si128(voutput_zero_point)); + vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, _mm256_castsi256_si128(voutput_min)), _mm256_castsi256_si128(voutput_max)); + __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567); + + if XNN_LIKELY(n >= (8 * sizeof(int8_t))) { + _mm_storel_epi64((__m128i*) output, vout0123456701234567); + output += 8; + n -= 8 * sizeof(int8_t); + } else { + if (n & (4 * sizeof(int8_t))) { + *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567); + vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32); + output += 4; + } + if (n & (2 * sizeof(int8_t))) { + *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0); + vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16); + output += 2; + } + if (n & (1 * sizeof(int8_t))) { + *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0); + } + n = 0; + } + } while (n != 0); + } +} diff --git a/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x8.c b/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x8.c new file mode 100644 index 000000000..127c635c1 --- /dev/null +++ b/src/qs8-vaddc/gen/minmax-avx2-mul32-ld64-x8.c @@ -0,0 +1,84 @@ +// Auto-generated file. Do not edit! +// Template: src/qs8-vaddc/avx2-mul32-ld64.c.in +// Generator: tools/xngen +// +// Copyright 2020 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include <assert.h> + +#include <immintrin.h> + +#include <xnnpack/intrinsics-polyfill.h> +#include <xnnpack/vadd.h> + + +void xnn_qs8_vaddc_minmax_ukernel__avx2_mul32_ld64_x8( + size_t n, + const int8_t* input_x, + const int8_t* input_y, + int8_t* output, + const union xnn_qs8_add_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN +{ + const __m256i vx_multiplier = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.x_multiplier)); + const __m256i vremainder_mask = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_mask)); + const __m256i vremainder_threshold = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) params->sse2.remainder_threshold)); + const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift); + const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point); + const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min); + const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max); + + __m256i vzero_point_product = _mm256_broadcastsi128_si256(_mm_add_epi32( + _mm_broadcastd_epi32(_mm_cvtsi32_si128(params->sse2.y_multiplier[0] * (int32_t) *input_y)), + _mm_load_si128((const __m128i*) params->sse2.zero_point_product))); + for (; n >= 8 * sizeof(int8_t); n -= 8 * sizeof(int8_t)) { + const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x)); + input_x += 8; + + __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier)); + + const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31)); + + vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold)); + + __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point); + + vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max); + + __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567); + + _mm_storel_epi64((__m128i*) output, vout0123456701234567); + output += 8; + } + if XNN_UNLIKELY(n != 0) { + { + const __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) input_x)); + + __m256i vacc01234567 = _mm256_add_epi32(vzero_point_product, _mm256_mullo_epi32(vx01234567, vx_multiplier)); + + const __m256i vrem01234567 = _mm256_add_epi32(_mm256_and_si256(vacc01234567, vremainder_mask), _mm256_srai_epi32(vacc01234567, 31)); + + vacc01234567 = _mm256_sub_epi32(_mm256_sra_epi32(vacc01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold)); + + __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point); + vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max); + __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567); + + if (n & (4 * sizeof(int8_t))) { + *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567); + vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32); + output += 4; + } + if (n & (2 * sizeof(int8_t))) { + *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0); + vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16); + output += 2; + } + if (n & (1 * sizeof(int8_t))) { + *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0); + } + } + } +} |