aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrank Barchard <fbarchard@google.com>2024-04-30 13:34:58 -0700
committerXNNPACK Team <xnnpack-github-robot@google.com>2024-04-30 13:36:37 -0700
commit0e3d0c2a03812fcfe8a557bf946bb404397dec31 (patch)
tree7590bda6263a1ed7690ac3efa97ce4d1ba745a40
parent0d7cc5ee1757a9080beedc797c2a6bca514e4f8f (diff)
downloadXNNPACK-upstream-master.tar.gz
Softmax kernels for AVX/AVX512 generate smaller unrolled variantsupstream-master
- Smallest kernels were unrolled to 8 vectors. Add 4 vector versions - F16 and F32, AVX2 and AVX512, raddstoreexpminusmax and raddexpminusmax PiperOrigin-RevId: 629513853
-rw-r--r--bench/f16-raddstoreexpminusmax.cc14
-rw-r--r--bench/f32-raddexpminusmax.cc122
-rw-r--r--bench/f32-raddstoreexpminusmax.cc44
-rw-r--r--cmake/microkernels.cmake20
-rw-r--r--microkernels.bzl20
-rwxr-xr-xscripts/generate-f16-raddstoreexpminusmax.sh2
-rwxr-xr-xscripts/generate-f32-raddexpminusmax.sh6
-rwxr-xr-xscripts/generate-f32-raddextexp.sh6
-rwxr-xr-xscripts/generate-f32-raddstoreexpminusmax.sh6
-rw-r--r--src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u16-acc2.c158
-rw-r--r--src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u16.c156
-rw-r--r--src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32-acc2.c239
-rw-r--r--src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32-acc4.c243
-rw-r--r--src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32.c236
-rw-r--r--src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64-acc2.c186
-rw-r--r--src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64-acc4.c190
-rw-r--r--src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64.c183
-rw-r--r--src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32-acc2.c264
-rw-r--r--src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32-acc4.c284
-rw-r--r--src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32.c249
-rw-r--r--src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64-acc2.c212
-rw-r--r--src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64-acc4.c228
-rw-r--r--src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64.c201
-rw-r--r--src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32-acc2.c204
-rw-r--r--src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32-acc4.c208
-rw-r--r--src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32.c202
-rw-r--r--src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64-acc2.c160
-rw-r--r--src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64-acc4.c164
-rw-r--r--src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64.c158
-rw-r--r--src/xnnpack/raddexpminusmax.h6
-rw-r--r--src/xnnpack/raddstoreexpminusmax.h8
-rw-r--r--test/f16-raddstoreexpminusmax.cc74
-rw-r--r--test/f16-raddstoreexpminusmax.yaml4
-rw-r--r--test/f32-raddexpminusmax.cc222
-rw-r--r--test/f32-raddexpminusmax.yaml7
-rw-r--r--test/f32-raddstoreexpminusmax.cc222
-rw-r--r--test/f32-raddstoreexpminusmax.yaml19
-rw-r--r--test/f32-vscaleexpminusmax.yaml1
38 files changed, 4880 insertions, 48 deletions
diff --git a/bench/f16-raddstoreexpminusmax.cc b/bench/f16-raddstoreexpminusmax.cc
index c0c7ca153..fff06d0aa 100644
--- a/bench/f16-raddstoreexpminusmax.cc
+++ b/bench/f16-raddstoreexpminusmax.cc
@@ -233,6 +233,20 @@ static void f16_raddstoreexpminusmax(
#endif // XNN_ENABLE_ARM_FP16_VECTOR && (XNN_ARCH_ARM || XNN_ARCH_ARM64)
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ BENCHMARK_CAPTURE(f16_raddstoreexpminusmax, avx2_rr1_p2_u16,
+ xnn_f16_rmax_ukernel__f16c_u32,
+ xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u16,
+ xnn_init_f16_expminus_avx2_rr1_p2_params,
+ benchmark::utils::CheckAVX2)
+ ->Apply(benchmark::utils::UnaryElementwiseParameters<uint16_t, uint16_t>)
+ ->UseRealTime();
+ BENCHMARK_CAPTURE(f16_raddstoreexpminusmax, avx2_rr1_p2_u16_acc2,
+ xnn_f16_rmax_ukernel__f16c_u32,
+ xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u16_acc2,
+ xnn_init_f16_expminus_avx2_rr1_p2_params,
+ benchmark::utils::CheckAVX2)
+ ->Apply(benchmark::utils::UnaryElementwiseParameters<uint16_t, uint16_t>)
+ ->UseRealTime();
BENCHMARK_CAPTURE(f16_raddstoreexpminusmax, avx2_rr1_p2_u32,
xnn_f16_rmax_ukernel__f16c_u32,
xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u32,
diff --git a/bench/f32-raddexpminusmax.cc b/bench/f32-raddexpminusmax.cc
index abeaa1aac..86ddcf0be 100644
--- a/bench/f32-raddexpminusmax.cc
+++ b/bench/f32-raddexpminusmax.cc
@@ -82,57 +82,18 @@ static void CharacteristicArguments(benchmark::internal::Benchmark* b) {
}
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
- BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u64,
- xnn_f32_rmax_ukernel__avx_u32_acc4,
- xnn_f32_raddexpminusmax_ukernel__avx2_p5_u64,
- benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
- BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u64_acc2,
- xnn_f32_rmax_ukernel__avx_u32_acc4,
- xnn_f32_raddexpminusmax_ukernel__avx2_p5_u64_acc2,
- benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
- BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u64_acc4,
- xnn_f32_rmax_ukernel__avx_u32_acc4,
- xnn_f32_raddexpminusmax_ukernel__avx2_p5_u64_acc4,
- benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
-
- BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u72,
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx512f_p5_scalef_u64,
xnn_f32_rmax_ukernel__avx_u32_acc4,
- xnn_f32_raddexpminusmax_ukernel__avx2_p5_u72,
- benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
- BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u72_acc3,
- xnn_f32_rmax_ukernel__avx_u32_acc4,
- xnn_f32_raddexpminusmax_ukernel__avx2_p5_u72_acc3,
- benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
-
- BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u80,
- xnn_f32_rmax_ukernel__avx_u32_acc4,
- xnn_f32_raddexpminusmax_ukernel__avx2_p5_u80,
- benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
- BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u80_acc2,
- xnn_f32_rmax_ukernel__avx_u32_acc4,
- xnn_f32_raddexpminusmax_ukernel__avx2_p5_u80_acc2,
- benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
- BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u80_acc5,
- xnn_f32_rmax_ukernel__avx_u32_acc4,
- xnn_f32_raddexpminusmax_ukernel__avx2_p5_u80_acc5,
- benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
-
- BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u96,
- xnn_f32_rmax_ukernel__avx_u32_acc4,
- xnn_f32_raddexpminusmax_ukernel__avx2_p5_u96,
- benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
- BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u96_acc2,
- xnn_f32_rmax_ukernel__avx_u32_acc4,
- xnn_f32_raddexpminusmax_ukernel__avx2_p5_u96_acc2,
- benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
- BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u96_acc3,
+ xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64,
+ benchmark::utils::CheckAVX512F)->Apply(CharacteristicArguments)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx512f_p5_scalef_u64_acc2,
xnn_f32_rmax_ukernel__avx_u32_acc4,
- xnn_f32_raddexpminusmax_ukernel__avx2_p5_u96_acc3,
- benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
- BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u96_acc6,
+ xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64_acc2,
+ benchmark::utils::CheckAVX512F)->Apply(CharacteristicArguments)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx512f_p5_scalef_u64_acc4,
xnn_f32_rmax_ukernel__avx_u32_acc4,
- xnn_f32_raddexpminusmax_ukernel__avx2_p5_u96_acc6,
- benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
+ xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64_acc4,
+ benchmark::utils::CheckAVX512F)->Apply(CharacteristicArguments)->UseRealTime();
BENCHMARK_CAPTURE(f32_raddexpminusmax, avx512f_p5_scalef_u128,
xnn_f32_rmax_ukernel__avx_u32_acc4,
@@ -185,6 +146,71 @@ static void CharacteristicArguments(benchmark::internal::Benchmark* b) {
xnn_f32_rmax_ukernel__avx_u32_acc4,
xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u192_acc6,
benchmark::utils::CheckAVX512F)->Apply(CharacteristicArguments)->UseRealTime();
+
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u32,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32,
+ benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u32_acc2,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32_acc2,
+ benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u32_acc4,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32_acc4,
+ benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
+
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u64,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddexpminusmax_ukernel__avx2_p5_u64,
+ benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u64_acc2,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddexpminusmax_ukernel__avx2_p5_u64_acc2,
+ benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u64_acc4,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddexpminusmax_ukernel__avx2_p5_u64_acc4,
+ benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
+
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u72,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddexpminusmax_ukernel__avx2_p5_u72,
+ benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u72_acc3,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddexpminusmax_ukernel__avx2_p5_u72_acc3,
+ benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
+
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u80,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddexpminusmax_ukernel__avx2_p5_u80,
+ benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u80_acc2,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddexpminusmax_ukernel__avx2_p5_u80_acc2,
+ benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u80_acc5,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddexpminusmax_ukernel__avx2_p5_u80_acc5,
+ benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
+
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u96,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddexpminusmax_ukernel__avx2_p5_u96,
+ benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u96_acc2,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddexpminusmax_ukernel__avx2_p5_u96_acc2,
+ benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u96_acc3,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddexpminusmax_ukernel__avx2_p5_u96_acc3,
+ benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_raddexpminusmax, avx2_p5_u96_acc6,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddexpminusmax_ukernel__avx2_p5_u96_acc6,
+ benchmark::utils::CheckAVX2)->Apply(CharacteristicArguments)->UseRealTime();
#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
#ifndef XNNPACK_BENCHMARK_NO_MAIN
diff --git a/bench/f32-raddstoreexpminusmax.cc b/bench/f32-raddstoreexpminusmax.cc
index 02a26813c..2951def4a 100644
--- a/bench/f32-raddstoreexpminusmax.cc
+++ b/bench/f32-raddstoreexpminusmax.cc
@@ -440,6 +440,28 @@ static void f32_raddstoreexpminusmax(
#endif // XNN_ENABLE_RISCV_VECTOR && XNN_ARCH_RISCV
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ BENCHMARK_CAPTURE(f32_raddstoreexpminusmax, avx512f_rr1_p5_scalef_u64,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64,
+ xnn_init_f32_expminus_avx512_rr1_p5_params,
+ benchmark::utils::CheckAVX512F)
+ ->Apply(benchmark::utils::UnaryElementwiseParameters<float, float>)
+ ->UseRealTime();
+ BENCHMARK_CAPTURE(f32_raddstoreexpminusmax, avx512f_rr1_p5_scalef_u64_acc2,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64_acc2,
+ xnn_init_f32_expminus_avx512_rr1_p5_params,
+ benchmark::utils::CheckAVX512F)
+ ->Apply(benchmark::utils::UnaryElementwiseParameters<float, float>)
+ ->UseRealTime();
+ BENCHMARK_CAPTURE(f32_raddstoreexpminusmax, avx512f_rr1_p5_scalef_u64_acc4,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64_acc4,
+ xnn_init_f32_expminus_avx512_rr1_p5_params,
+ benchmark::utils::CheckAVX512F)
+ ->Apply(benchmark::utils::UnaryElementwiseParameters<float, float>)
+ ->UseRealTime();
+
BENCHMARK_CAPTURE(f32_raddstoreexpminusmax, avx512f_rr1_p5_scalef_u128,
xnn_f32_rmax_ukernel__avx_u32_acc4,
xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u128,
@@ -528,6 +550,28 @@ static void f32_raddstoreexpminusmax(
->Apply(benchmark::utils::UnaryElementwiseParameters<float, float>)
->UseRealTime();
+ BENCHMARK_CAPTURE(f32_raddstoreexpminusmax, avx2_rr1_p5_u32,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32,
+ xnn_init_f32_expminus_avx2_rr1_p5_params,
+ benchmark::utils::CheckAVX2)
+ ->Apply(benchmark::utils::UnaryElementwiseParameters<float, float>)
+ ->UseRealTime();
+ BENCHMARK_CAPTURE(f32_raddstoreexpminusmax, avx2_rr1_p5_u32_acc2,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32_acc2,
+ xnn_init_f32_expminus_avx2_rr1_p5_params,
+ benchmark::utils::CheckAVX2)
+ ->Apply(benchmark::utils::UnaryElementwiseParameters<float, float>)
+ ->UseRealTime();
+ BENCHMARK_CAPTURE(f32_raddstoreexpminusmax, avx2_rr1_p5_u32_acc4,
+ xnn_f32_rmax_ukernel__avx_u32_acc4,
+ xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32_acc4,
+ xnn_init_f32_expminus_avx2_rr1_p5_params,
+ benchmark::utils::CheckAVX2)
+ ->Apply(benchmark::utils::UnaryElementwiseParameters<float, float>)
+ ->UseRealTime();
+
BENCHMARK_CAPTURE(f32_raddstoreexpminusmax, avx2_rr1_p5_u64,
xnn_f32_rmax_ukernel__avx_u32_acc4,
xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u64,
diff --git a/cmake/microkernels.cmake b/cmake/microkernels.cmake
index 8cc966f3f..0ddfa1d42 100644
--- a/cmake/microkernels.cmake
+++ b/cmake/microkernels.cmake
@@ -603,6 +603,8 @@ SET(ALL_AVX2_MICROKERNEL_SRCS
src/f16-igemm/gen/f16-igemm-7x8-minmax-avx2-broadcast.c
src/f16-pavgpool/f16-pavgpool-9p8x-minmax-avx2-c8.c
src/f16-pavgpool/f16-pavgpool-9x-minmax-avx2-c8.c
+ src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u16-acc2.c
+ src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u16.c
src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u32-acc2.c
src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u32-acc4.c
src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u32.c
@@ -698,6 +700,9 @@ SET(ALL_AVX2_MICROKERNEL_SRCS
src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx2-u32.c
src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx2-u48.c
src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx2-u64.c
+ src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32-acc2.c
+ src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32-acc4.c
+ src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32.c
src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u64-acc2.c
src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u64-acc4.c
src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u64.c
@@ -710,6 +715,9 @@ SET(ALL_AVX2_MICROKERNEL_SRCS
src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u96-acc3.c
src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u96-acc6.c
src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u96.c
+ src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32-acc2.c
+ src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32-acc4.c
+ src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32.c
src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u64-acc2.c
src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u64-acc4.c
src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u64.c
@@ -722,6 +730,9 @@ SET(ALL_AVX2_MICROKERNEL_SRCS
src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u96-acc3.c
src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u96-acc6.c
src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u96.c
+ src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32-acc2.c
+ src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32-acc4.c
+ src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32.c
src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u64-acc2.c
src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u64-acc4.c
src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u64.c
@@ -1257,6 +1268,9 @@ SET(ALL_AVX512F_MICROKERNEL_SRCS
src/f32-igemm/gen/f32-igemm-8x16-minmax-avx512f-broadcast.c
src/f32-prelu/gen/f32-prelu-avx512f-2x16.c
src/f32-prelu/gen/f32-prelu-avx512f-2x32.c
+ src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64-acc2.c
+ src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64-acc4.c
+ src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64.c
src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u128-acc2.c
src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u128-acc4.c
src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u128.c
@@ -1269,6 +1283,9 @@ SET(ALL_AVX512F_MICROKERNEL_SRCS
src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u192-acc3.c
src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u192-acc6.c
src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u192.c
+ src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64-acc2.c
+ src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64-acc4.c
+ src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64.c
src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u128-acc2.c
src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u128-acc4.c
src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u128.c
@@ -1281,6 +1298,9 @@ SET(ALL_AVX512F_MICROKERNEL_SRCS
src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u192-acc3.c
src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u192-acc6.c
src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u192.c
+ src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64-acc2.c
+ src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64-acc4.c
+ src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64.c
src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u128-acc2.c
src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u128-acc4.c
src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u128.c
diff --git a/microkernels.bzl b/microkernels.bzl
index 428bb79b5..f938c7524 100644
--- a/microkernels.bzl
+++ b/microkernels.bzl
@@ -601,6 +601,8 @@ ALL_AVX2_MICROKERNEL_SRCS = [
"src/f16-igemm/gen/f16-igemm-7x8-minmax-avx2-broadcast.c",
"src/f16-pavgpool/f16-pavgpool-9p8x-minmax-avx2-c8.c",
"src/f16-pavgpool/f16-pavgpool-9x-minmax-avx2-c8.c",
+ "src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u16-acc2.c",
+ "src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u16.c",
"src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u32-acc2.c",
"src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u32-acc4.c",
"src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u32.c",
@@ -696,6 +698,9 @@ ALL_AVX2_MICROKERNEL_SRCS = [
"src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx2-u32.c",
"src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx2-u48.c",
"src/f32-qu8-vcvt/gen/f32-qu8-vcvt-avx2-u64.c",
+ "src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32-acc2.c",
+ "src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32-acc4.c",
+ "src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32.c",
"src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u64-acc2.c",
"src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u64-acc4.c",
"src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u64.c",
@@ -708,6 +713,9 @@ ALL_AVX2_MICROKERNEL_SRCS = [
"src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u96-acc3.c",
"src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u96-acc6.c",
"src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u96.c",
+ "src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32-acc2.c",
+ "src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32-acc4.c",
+ "src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32.c",
"src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u64-acc2.c",
"src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u64-acc4.c",
"src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u64.c",
@@ -720,6 +728,9 @@ ALL_AVX2_MICROKERNEL_SRCS = [
"src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u96-acc3.c",
"src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u96-acc6.c",
"src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u96.c",
+ "src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32-acc2.c",
+ "src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32-acc4.c",
+ "src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32.c",
"src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u64-acc2.c",
"src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u64-acc4.c",
"src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u64.c",
@@ -1257,6 +1268,9 @@ ALL_AVX512F_MICROKERNEL_SRCS = [
"src/f32-igemm/gen/f32-igemm-8x16-minmax-avx512f-broadcast.c",
"src/f32-prelu/gen/f32-prelu-avx512f-2x16.c",
"src/f32-prelu/gen/f32-prelu-avx512f-2x32.c",
+ "src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64-acc2.c",
+ "src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64-acc4.c",
+ "src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64.c",
"src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u128-acc2.c",
"src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u128-acc4.c",
"src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u128.c",
@@ -1269,6 +1283,9 @@ ALL_AVX512F_MICROKERNEL_SRCS = [
"src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u192-acc3.c",
"src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u192-acc6.c",
"src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u192.c",
+ "src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64-acc2.c",
+ "src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64-acc4.c",
+ "src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64.c",
"src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u128-acc2.c",
"src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u128-acc4.c",
"src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u128.c",
@@ -1281,6 +1298,9 @@ ALL_AVX512F_MICROKERNEL_SRCS = [
"src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u192-acc3.c",
"src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u192-acc6.c",
"src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u192.c",
+ "src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64-acc2.c",
+ "src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64-acc4.c",
+ "src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64.c",
"src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u128-acc2.c",
"src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u128-acc4.c",
"src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u128.c",
diff --git a/scripts/generate-f16-raddstoreexpminusmax.sh b/scripts/generate-f16-raddstoreexpminusmax.sh
index bccf4ae80..788f7ae08 100755
--- a/scripts/generate-f16-raddstoreexpminusmax.sh
+++ b/scripts/generate-f16-raddstoreexpminusmax.sh
@@ -28,6 +28,8 @@ tools/xngen src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in -D BATCH_TILE
tools/xngen src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in -D BATCH_TILE=96 -D ACCUMULATORS=6 -o src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-neonfp16arith-rr2-p2-u96-acc6.c &
# x86 AVX2
+tools/xngen src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in -D BATCH_TILE=16 -D ACCUMULATORS=1 -o src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u16.c &
+tools/xngen src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in -D BATCH_TILE=16 -D ACCUMULATORS=2 -o src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u16-acc2.c &
tools/xngen src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in -D BATCH_TILE=32 -D ACCUMULATORS=1 -o src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u32.c &
tools/xngen src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in -D BATCH_TILE=32 -D ACCUMULATORS=2 -o src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u32-acc2.c &
tools/xngen src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in -D BATCH_TILE=32 -D ACCUMULATORS=4 -o src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u32-acc4.c &
diff --git a/scripts/generate-f32-raddexpminusmax.sh b/scripts/generate-f32-raddexpminusmax.sh
index 611b8923b..8301bf49f 100755
--- a/scripts/generate-f32-raddexpminusmax.sh
+++ b/scripts/generate-f32-raddexpminusmax.sh
@@ -5,6 +5,9 @@
# LICENSE file in the root directory of this source tree.
################################### x86 AVX2 ##################################
+tools/xngen src/f32-raddexpminusmax/avx2-p5.c.in -D BATCH_TILE=32 -D ACCUMULATORS=1 -o src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32.c &
+tools/xngen src/f32-raddexpminusmax/avx2-p5.c.in -D BATCH_TILE=32 -D ACCUMULATORS=2 -o src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32-acc2.c &
+tools/xngen src/f32-raddexpminusmax/avx2-p5.c.in -D BATCH_TILE=32 -D ACCUMULATORS=4 -o src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32-acc4.c &
tools/xngen src/f32-raddexpminusmax/avx2-p5.c.in -D BATCH_TILE=64 -D ACCUMULATORS=1 -o src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u64.c &
tools/xngen src/f32-raddexpminusmax/avx2-p5.c.in -D BATCH_TILE=64 -D ACCUMULATORS=2 -o src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u64-acc2.c &
tools/xngen src/f32-raddexpminusmax/avx2-p5.c.in -D BATCH_TILE=64 -D ACCUMULATORS=4 -o src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u64-acc4.c &
@@ -19,6 +22,9 @@ tools/xngen src/f32-raddexpminusmax/avx2-p5.c.in -D BATCH_TILE=96 -D ACCUMULATOR
tools/xngen src/f32-raddexpminusmax/avx2-p5.c.in -D BATCH_TILE=96 -D ACCUMULATORS=6 -o src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u96-acc6.c &
################################# x86 AVX512F #################################
+tools/xngen src/f32-raddexpminusmax/avx512f-p5-scalef.c.in -D BATCH_TILE=64 -D ACCUMULATORS=1 -o src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64.c &
+tools/xngen src/f32-raddexpminusmax/avx512f-p5-scalef.c.in -D BATCH_TILE=64 -D ACCUMULATORS=2 -o src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64-acc2.c &
+tools/xngen src/f32-raddexpminusmax/avx512f-p5-scalef.c.in -D BATCH_TILE=64 -D ACCUMULATORS=4 -o src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64-acc4.c &
tools/xngen src/f32-raddexpminusmax/avx512f-p5-scalef.c.in -D BATCH_TILE=128 -D ACCUMULATORS=1 -o src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u128.c &
tools/xngen src/f32-raddexpminusmax/avx512f-p5-scalef.c.in -D BATCH_TILE=128 -D ACCUMULATORS=2 -o src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u128-acc2.c &
tools/xngen src/f32-raddexpminusmax/avx512f-p5-scalef.c.in -D BATCH_TILE=128 -D ACCUMULATORS=4 -o src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u128-acc4.c &
diff --git a/scripts/generate-f32-raddextexp.sh b/scripts/generate-f32-raddextexp.sh
index b54637999..2f272accc 100755
--- a/scripts/generate-f32-raddextexp.sh
+++ b/scripts/generate-f32-raddextexp.sh
@@ -5,6 +5,9 @@
# LICENSE file in the root directory of this source tree.
################################### x86 AVX2 ##################################
+tools/xngen src/f32-raddextexp/avx2-p5.c.in -D BATCH_TILE=32 -D ACCUMULATORS=1 -o src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32.c &
+tools/xngen src/f32-raddextexp/avx2-p5.c.in -D BATCH_TILE=32 -D ACCUMULATORS=2 -o src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32-acc2.c &
+tools/xngen src/f32-raddextexp/avx2-p5.c.in -D BATCH_TILE=32 -D ACCUMULATORS=4 -o src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32-acc4.c &
tools/xngen src/f32-raddextexp/avx2-p5.c.in -D BATCH_TILE=64 -D ACCUMULATORS=1 -o src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u64.c &
tools/xngen src/f32-raddextexp/avx2-p5.c.in -D BATCH_TILE=64 -D ACCUMULATORS=2 -o src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u64-acc2.c &
tools/xngen src/f32-raddextexp/avx2-p5.c.in -D BATCH_TILE=64 -D ACCUMULATORS=4 -o src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u64-acc4.c &
@@ -19,6 +22,9 @@ tools/xngen src/f32-raddextexp/avx2-p5.c.in -D BATCH_TILE=96 -D ACCUMULATORS=3 -
tools/xngen src/f32-raddextexp/avx2-p5.c.in -D BATCH_TILE=96 -D ACCUMULATORS=6 -o src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u96-acc6.c &
################################# x86 AVX512F #################################
+tools/xngen src/f32-raddextexp/avx512f-p5-scalef.c.in -D BATCH_TILE=64 -D ACCUMULATORS=1 -o src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64.c &
+tools/xngen src/f32-raddextexp/avx512f-p5-scalef.c.in -D BATCH_TILE=64 -D ACCUMULATORS=2 -o src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64-acc2.c &
+tools/xngen src/f32-raddextexp/avx512f-p5-scalef.c.in -D BATCH_TILE=64 -D ACCUMULATORS=4 -o src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64-acc4.c &
tools/xngen src/f32-raddextexp/avx512f-p5-scalef.c.in -D BATCH_TILE=128 -D ACCUMULATORS=1 -o src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u128.c &
tools/xngen src/f32-raddextexp/avx512f-p5-scalef.c.in -D BATCH_TILE=128 -D ACCUMULATORS=2 -o src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u128-acc2.c &
tools/xngen src/f32-raddextexp/avx512f-p5-scalef.c.in -D BATCH_TILE=128 -D ACCUMULATORS=4 -o src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u128-acc4.c &
diff --git a/scripts/generate-f32-raddstoreexpminusmax.sh b/scripts/generate-f32-raddstoreexpminusmax.sh
index d7fd5dfff..37e60457d 100755
--- a/scripts/generate-f32-raddstoreexpminusmax.sh
+++ b/scripts/generate-f32-raddstoreexpminusmax.sh
@@ -76,6 +76,9 @@ tools/xngen src/f32-raddstoreexpminusmax/sse2-rr2-p5.c.in -D BATCH_TILE=20 -D AC
tools/xngen src/f32-raddstoreexpminusmax/sse2-rr2-p5.c.in -D BATCH_TILE=20 -D ACCUMULATORS=5 -o src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-sse2-rr2-p5-u20-acc5.c &
################################### x86 AVX2 ##################################
+tools/xngen src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in -D BATCH_TILE=32 -D ACCUMULATORS=1 -o src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32.c &
+tools/xngen src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in -D BATCH_TILE=32 -D ACCUMULATORS=2 -o src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32-acc2.c &
+tools/xngen src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in -D BATCH_TILE=32 -D ACCUMULATORS=4 -o src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32-acc4.c &
tools/xngen src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in -D BATCH_TILE=64 -D ACCUMULATORS=1 -o src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u64.c &
tools/xngen src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in -D BATCH_TILE=64 -D ACCUMULATORS=2 -o src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u64-acc2.c &
tools/xngen src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in -D BATCH_TILE=64 -D ACCUMULATORS=4 -o src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u64-acc4.c &
@@ -90,6 +93,9 @@ tools/xngen src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in -D BATCH_TILE=96 -D AC
tools/xngen src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in -D BATCH_TILE=96 -D ACCUMULATORS=6 -o src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u96-acc6.c &
################################# x86 AVX512F #################################
+tools/xngen src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in -D BATCH_TILE=64 -D ACCUMULATORS=1 -o src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64.c &
+tools/xngen src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in -D BATCH_TILE=64 -D ACCUMULATORS=2 -o src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64-acc2.c &
+tools/xngen src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in -D BATCH_TILE=64 -D ACCUMULATORS=4 -o src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64-acc4.c &
tools/xngen src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in -D BATCH_TILE=128 -D ACCUMULATORS=1 -o src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u128.c &
tools/xngen src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in -D BATCH_TILE=128 -D ACCUMULATORS=2 -o src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u128-acc2.c &
tools/xngen src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in -D BATCH_TILE=128 -D ACCUMULATORS=4 -o src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u128-acc4.c &
diff --git a/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u16-acc2.c b/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u16-acc2.c
new file mode 100644
index 000000000..3c0706a21
--- /dev/null
+++ b/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u16-acc2.c
@@ -0,0 +1,158 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
+// Generator: tools/xngen
+//
+// Copyright 2022 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/raddstoreexpminusmax.h>
+
+
+void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u16_acc2(
+ size_t batch,
+ const void* input,
+ const void* max,
+ void* output,
+ void* sum,
+ const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
+{
+ assert(batch != 0);
+ assert(batch % sizeof(uint16_t) == 0);
+ assert(input != NULL);
+ assert(max != NULL);
+ assert(output != NULL);
+ assert(sum != NULL);
+
+ const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
+ const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
+ const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
+ const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
+ const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
+ const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
+ const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
+
+ const uint16_t* i = (const uint16_t*) input;
+ uint16_t* o = (uint16_t*) output;
+ __m256 vacc0 = _mm256_setzero_ps();
+ __m256 vacc1 = _mm256_setzero_ps();
+ for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
+ const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
+ const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
+ i += 16;
+
+ const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
+ const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
+
+ __m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
+ __m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
+
+ const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
+ const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
+
+ vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+ vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+
+ __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
+ __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
+
+ const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
+ const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
+
+ vt0 = _mm256_mul_ps(vt0, vs0);
+ vt1 = _mm256_mul_ps(vt1, vs1);
+
+ __m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
+ __m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
+
+ vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
+ vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
+
+ _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
+ _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
+ o += 16;
+
+ vacc0 = _mm256_add_ps(vacc0, vf0);
+ vacc1 = _mm256_add_ps(vacc1, vf1);
+ }
+ vacc0 = _mm256_add_ps(vacc0, vacc1);
+
+ __m256 vacc = vacc0;
+ for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
+ const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
+ i += 8;
+
+ const __m256 vx = _mm256_sub_ps(vi, vi_max);
+
+ __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
+
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+
+ vn = _mm256_sub_ps(vn, vmagic_bias);
+
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
+
+ const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
+ vt = _mm256_mul_ps(vt, vs);
+ __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
+ vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
+
+ _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
+ o += 8;
+
+ vacc = _mm256_add_ps(vacc, vf);
+ }
+ __m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
+ if (batch != 0) {
+ assert(batch >= 1 * sizeof(uint16_t));
+ assert(batch <= 7 * sizeof(uint16_t));
+
+ const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
+
+ const __m256 vx = _mm256_sub_ps(vi, vi_max);
+
+ __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
+
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+
+ vn = _mm256_sub_ps(vn, vmagic_bias);
+
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
+
+ const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
+ vt = _mm256_mul_ps(vt, vs);
+ __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
+ vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
+
+ __m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
+ __m128 vf_lo = _mm256_castps256_ps128(vf);
+ if (batch & (4 * sizeof(uint16_t))) {
+ _mm_storel_epi64((__m128i*) o, vh);
+ vh = _mm_unpackhi_epi64(vh, vh);
+ vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
+ vf_lo = _mm256_extractf128_ps(vf, 1);
+ o += 4;
+ }
+ if (batch & (2 * sizeof(uint16_t))) {
+ _mm_storeu_si32(o, vh);
+ vh = _mm_srli_epi64(vh, 32);
+ vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
+ vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
+ o += 2;
+ }
+ if (batch & (1 * sizeof(uint16_t))) {
+ *o = (uint16_t) _mm_extract_epi16(vh, 0);
+ vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
+ }
+ }
+ vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
+ vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
+ *((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
+ _mm256_zeroupper();
+}
diff --git a/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u16.c b/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u16.c
new file mode 100644
index 000000000..1e72df51b
--- /dev/null
+++ b/src/f16-raddstoreexpminusmax/gen/f16-raddstoreexpminusmax-avx2-rr1-p2-u16.c
@@ -0,0 +1,156 @@
+// Auto-generated file. Do not edit!
+// Template: src/f16-raddstoreexpminusmax/avx2-rr1-p2.c.in
+// Generator: tools/xngen
+//
+// Copyright 2022 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/raddstoreexpminusmax.h>
+
+
+void xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u16(
+ size_t batch,
+ const void* input,
+ const void* max,
+ void* output,
+ void* sum,
+ const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
+{
+ assert(batch != 0);
+ assert(batch % sizeof(uint16_t) == 0);
+ assert(input != NULL);
+ assert(max != NULL);
+ assert(output != NULL);
+ assert(sum != NULL);
+
+ const __m256 vi_max = _mm256_cvtph_ps(_mm_set1_epi16((short) *((const uint16_t*) max)));
+ const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e);
+ const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias);
+ const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2);
+ const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2);
+ const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1);
+ const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff);
+
+ const uint16_t* i = (const uint16_t*) input;
+ uint16_t* o = (uint16_t*) output;
+ __m256 vacc0 = _mm256_setzero_ps();
+ for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
+ const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
+ const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
+ i += 16;
+
+ const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
+ const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
+
+ __m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
+ __m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
+
+ const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
+ const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
+
+ vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+ vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+
+ __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
+ __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
+
+ const __m256 vp0 = _mm256_fmadd_ps(vc2, vt0, vc1);
+ const __m256 vp1 = _mm256_fmadd_ps(vc2, vt1, vc1);
+
+ vt0 = _mm256_mul_ps(vt0, vs0);
+ vt1 = _mm256_mul_ps(vt1, vs1);
+
+ __m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
+ __m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
+
+ vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
+ vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
+
+ _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf0, _MM_FROUND_TO_NEAREST_INT));
+ _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vf1, _MM_FROUND_TO_NEAREST_INT));
+ o += 16;
+
+ vacc0 = _mm256_add_ps(vacc0, vf0);
+ vacc0 = _mm256_add_ps(vacc0, vf1);
+ }
+
+ __m256 vacc = vacc0;
+ for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
+ const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
+ i += 8;
+
+ const __m256 vx = _mm256_sub_ps(vi, vi_max);
+
+ __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
+
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+
+ vn = _mm256_sub_ps(vn, vmagic_bias);
+
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
+
+ const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
+ vt = _mm256_mul_ps(vt, vs);
+ __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
+ vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
+
+ _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT));
+ o += 8;
+
+ vacc = _mm256_add_ps(vacc, vf);
+ }
+ __m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
+ if (batch != 0) {
+ assert(batch >= 1 * sizeof(uint16_t));
+ assert(batch <= 7 * sizeof(uint16_t));
+
+ const __m256 vi = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
+
+ const __m256 vx = _mm256_sub_ps(vi, vi_max);
+
+ __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
+
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+
+ vn = _mm256_sub_ps(vn, vmagic_bias);
+
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
+
+ const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1);
+ vt = _mm256_mul_ps(vt, vs);
+ __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
+ vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
+
+ __m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_TO_NEAREST_INT);
+ __m128 vf_lo = _mm256_castps256_ps128(vf);
+ if (batch & (4 * sizeof(uint16_t))) {
+ _mm_storel_epi64((__m128i*) o, vh);
+ vh = _mm_unpackhi_epi64(vh, vh);
+ vacc_lo = _mm_add_ps(vacc_lo, vf_lo);
+ vf_lo = _mm256_extractf128_ps(vf, 1);
+ o += 4;
+ }
+ if (batch & (2 * sizeof(uint16_t))) {
+ _mm_storeu_si32(o, vh);
+ vh = _mm_srli_epi64(vh, 32);
+ vacc_lo = _mm_blend_ps(_mm_add_ps(vacc_lo, vf_lo), vacc_lo, 0xC);
+ vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
+ o += 2;
+ }
+ if (batch & (1 * sizeof(uint16_t))) {
+ *o = (uint16_t) _mm_extract_epi16(vh, 0);
+ vacc_lo = _mm_add_ss(vacc_lo, vf_lo);
+ }
+ }
+ vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
+ vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
+ *((uint16_t*) sum) = (uint16_t) _mm_extract_epi16(_mm_cvtps_ph(vacc_lo, _MM_FROUND_TO_NEAREST_INT), 0);
+ _mm256_zeroupper();
+}
diff --git a/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32-acc2.c b/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32-acc2.c
new file mode 100644
index 000000000..d9cb62a35
--- /dev/null
+++ b/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32-acc2.c
@@ -0,0 +1,239 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddexpminusmax/avx2-p5.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/raddexpminusmax.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32_acc2(
+ size_t batch,
+ const float* input,
+ float* sum,
+ float max)
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(sum != NULL);
+
+ const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+ // The smallest x for which expf(x) is normalized.
+ const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
+ const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+ const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
+ const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
+
+ const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
+ const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
+ const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
+ const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
+ const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
+
+ const __m256 vi_max = _mm256_set1_ps(max);
+
+ __m256 vacc0 = _mm256_setzero_ps();
+ __m256 vacc1 = _mm256_setzero_ps();
+ for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
+ // Load 32 (4x8) inputs at a time.
+ const __m256 vi0 = _mm256_loadu_ps(input);
+ const __m256 vi1 = _mm256_loadu_ps(input + 8);
+ const __m256 vi2 = _mm256_loadu_ps(input + 16);
+ const __m256 vi3 = _mm256_loadu_ps(input + 24);
+ input += 32;
+
+ // Subtract maximum input x := i - i_max. This implies x <= 0.
+ const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
+ const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
+ const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
+ const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ __m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
+ __m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
+ __m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
+ __m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
+
+ // Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
+ // -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
+ const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
+ const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
+ const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
+ const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
+
+ // Subtract the large number back to get final batch := round(x / log(2)).
+ vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+ vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+ vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+ vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
+ __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
+ __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
+ __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
+
+ vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
+ vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
+ vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
+ vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
+ __m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
+ __m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
+ __m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
+
+ // Reconstruct the final f value:
+ // f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
+ // = s + (t * s) * p
+ vt0 = _mm256_mul_ps(vt0, vs0);
+ vt1 = _mm256_mul_ps(vt1, vs1);
+ vt2 = _mm256_mul_ps(vt2, vs2);
+ vt3 = _mm256_mul_ps(vt3, vs3);
+
+ __m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
+ __m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
+ __m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
+ __m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
+
+ // For inputs below zero cutoff, replace output with +0.0f.
+ // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
+ vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
+ vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
+ vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
+ vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
+
+ // Accumulate computed exponents.
+ vacc0 = _mm256_add_ps(vacc0, vf0);
+ vacc1 = _mm256_add_ps(vacc1, vf1);
+ vacc0 = _mm256_add_ps(vacc0, vf2);
+ vacc1 = _mm256_add_ps(vacc1, vf3);
+ }
+ // Add up all accumulators to vacc0
+ vacc0 = _mm256_add_ps(vacc0, vacc1);
+
+ __m256 vacc = vacc0;
+ for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
+ // Load 8 inputs at a time.
+ const __m256 vi = _mm256_loadu_ps(input);
+ input += 8;
+
+ // Subtract maximum input x := i - i_max. This implies x <= 0.
+ const __m256 vx = _mm256_sub_ps(vi, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
+
+ // Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
+ // -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+
+ // Subtract the large number back to get final batch := round(x / log(2)).
+ vn = _mm256_sub_ps(vn, vmagic_bias);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+
+ // Reconstruct the final f value:
+ // f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
+ // = s + (t * s) * p
+ vt = _mm256_mul_ps(vt, vs);
+ __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
+
+ // For inputs below zero cutoff, replace output with +0.0f.
+ // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
+ vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
+
+ // Accumulate computed exponents.
+ vacc = _mm256_add_ps(vacc, vf);
+ }
+ if (batch != 0) {
+ assert(batch >= 1 * sizeof(float));
+ assert(batch <= 7 * sizeof(float));
+ const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
+
+ // Load up to 7 inputs at a time.
+ const __m256 vi = _mm256_maskload_ps(input, vmask);
+
+ // Subtract maximum input x := i - i_max. This implies x <= 0.
+ const __m256 vx = _mm256_sub_ps(vi, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
+
+ // Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
+ // -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+
+ // Subtract the large number back to get final batch := round(x / log(2)).
+ vn = _mm256_sub_ps(vn, vmagic_bias);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+
+ // Reconstruct the final f value:
+ // f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
+ // = s + (t * s) * p
+ vt = _mm256_mul_ps(vt, vs);
+ __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
+
+ // For inputs below zero cutoff, replace output with +0.0f.
+ // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
+ vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
+
+ // Accumulate computed exponents. And addend with mask to leave unmasked 32-bit lanes unchanged.
+ vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
+ }
+ // Reduce 8 batch in the SIMD register
+ __m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
+ vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
+ vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
+ _mm_store_ss(sum, vacc_lo);
+ _mm256_zeroupper();
+}
diff --git a/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32-acc4.c b/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32-acc4.c
new file mode 100644
index 000000000..3dd23416a
--- /dev/null
+++ b/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32-acc4.c
@@ -0,0 +1,243 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddexpminusmax/avx2-p5.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/raddexpminusmax.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32_acc4(
+ size_t batch,
+ const float* input,
+ float* sum,
+ float max)
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(sum != NULL);
+
+ const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+ // The smallest x for which expf(x) is normalized.
+ const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
+ const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+ const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
+ const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
+
+ const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
+ const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
+ const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
+ const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
+ const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
+
+ const __m256 vi_max = _mm256_set1_ps(max);
+
+ __m256 vacc0 = _mm256_setzero_ps();
+ __m256 vacc1 = _mm256_setzero_ps();
+ __m256 vacc2 = _mm256_setzero_ps();
+ __m256 vacc3 = _mm256_setzero_ps();
+ for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
+ // Load 32 (4x8) inputs at a time.
+ const __m256 vi0 = _mm256_loadu_ps(input);
+ const __m256 vi1 = _mm256_loadu_ps(input + 8);
+ const __m256 vi2 = _mm256_loadu_ps(input + 16);
+ const __m256 vi3 = _mm256_loadu_ps(input + 24);
+ input += 32;
+
+ // Subtract maximum input x := i - i_max. This implies x <= 0.
+ const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
+ const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
+ const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
+ const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ __m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
+ __m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
+ __m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
+ __m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
+
+ // Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
+ // -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
+ const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
+ const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
+ const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
+ const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
+
+ // Subtract the large number back to get final batch := round(x / log(2)).
+ vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+ vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+ vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+ vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
+ __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
+ __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
+ __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
+
+ vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
+ vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
+ vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
+ vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
+ __m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
+ __m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
+ __m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
+
+ // Reconstruct the final f value:
+ // f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
+ // = s + (t * s) * p
+ vt0 = _mm256_mul_ps(vt0, vs0);
+ vt1 = _mm256_mul_ps(vt1, vs1);
+ vt2 = _mm256_mul_ps(vt2, vs2);
+ vt3 = _mm256_mul_ps(vt3, vs3);
+
+ __m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
+ __m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
+ __m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
+ __m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
+
+ // For inputs below zero cutoff, replace output with +0.0f.
+ // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
+ vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
+ vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
+ vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
+ vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
+
+ // Accumulate computed exponents.
+ vacc0 = _mm256_add_ps(vacc0, vf0);
+ vacc1 = _mm256_add_ps(vacc1, vf1);
+ vacc2 = _mm256_add_ps(vacc2, vf2);
+ vacc3 = _mm256_add_ps(vacc3, vf3);
+ }
+ // Add up all accumulators to vacc0
+ vacc0 = _mm256_add_ps(vacc0, vacc1);
+ vacc2 = _mm256_add_ps(vacc2, vacc3);
+ vacc0 = _mm256_add_ps(vacc0, vacc2);
+
+ __m256 vacc = vacc0;
+ for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
+ // Load 8 inputs at a time.
+ const __m256 vi = _mm256_loadu_ps(input);
+ input += 8;
+
+ // Subtract maximum input x := i - i_max. This implies x <= 0.
+ const __m256 vx = _mm256_sub_ps(vi, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
+
+ // Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
+ // -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+
+ // Subtract the large number back to get final batch := round(x / log(2)).
+ vn = _mm256_sub_ps(vn, vmagic_bias);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+
+ // Reconstruct the final f value:
+ // f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
+ // = s + (t * s) * p
+ vt = _mm256_mul_ps(vt, vs);
+ __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
+
+ // For inputs below zero cutoff, replace output with +0.0f.
+ // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
+ vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
+
+ // Accumulate computed exponents.
+ vacc = _mm256_add_ps(vacc, vf);
+ }
+ if (batch != 0) {
+ assert(batch >= 1 * sizeof(float));
+ assert(batch <= 7 * sizeof(float));
+ const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
+
+ // Load up to 7 inputs at a time.
+ const __m256 vi = _mm256_maskload_ps(input, vmask);
+
+ // Subtract maximum input x := i - i_max. This implies x <= 0.
+ const __m256 vx = _mm256_sub_ps(vi, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
+
+ // Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
+ // -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+
+ // Subtract the large number back to get final batch := round(x / log(2)).
+ vn = _mm256_sub_ps(vn, vmagic_bias);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+
+ // Reconstruct the final f value:
+ // f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
+ // = s + (t * s) * p
+ vt = _mm256_mul_ps(vt, vs);
+ __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
+
+ // For inputs below zero cutoff, replace output with +0.0f.
+ // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
+ vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
+
+ // Accumulate computed exponents. And addend with mask to leave unmasked 32-bit lanes unchanged.
+ vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
+ }
+ // Reduce 8 batch in the SIMD register
+ __m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
+ vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
+ vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
+ _mm_store_ss(sum, vacc_lo);
+ _mm256_zeroupper();
+}
diff --git a/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32.c b/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32.c
new file mode 100644
index 000000000..b9bf4ff61
--- /dev/null
+++ b/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx2-p5-u32.c
@@ -0,0 +1,236 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddexpminusmax/avx2-p5.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/raddexpminusmax.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32(
+ size_t batch,
+ const float* input,
+ float* sum,
+ float max)
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(sum != NULL);
+
+ const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+ // The smallest x for which expf(x) is normalized.
+ const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
+ const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+ const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
+ const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
+
+ const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
+ const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
+ const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
+ const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
+ const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
+
+ const __m256 vi_max = _mm256_set1_ps(max);
+
+ __m256 vacc0 = _mm256_setzero_ps();
+ for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
+ // Load 32 (4x8) inputs at a time.
+ const __m256 vi0 = _mm256_loadu_ps(input);
+ const __m256 vi1 = _mm256_loadu_ps(input + 8);
+ const __m256 vi2 = _mm256_loadu_ps(input + 16);
+ const __m256 vi3 = _mm256_loadu_ps(input + 24);
+ input += 32;
+
+ // Subtract maximum input x := i - i_max. This implies x <= 0.
+ const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
+ const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
+ const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
+ const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ __m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
+ __m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
+ __m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
+ __m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
+
+ // Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
+ // -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
+ const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
+ const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
+ const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
+ const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
+
+ // Subtract the large number back to get final batch := round(x / log(2)).
+ vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+ vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+ vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+ vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
+ __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
+ __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
+ __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
+
+ vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
+ vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
+ vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
+ vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
+ __m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
+ __m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
+ __m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
+
+ // Reconstruct the final f value:
+ // f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
+ // = s + (t * s) * p
+ vt0 = _mm256_mul_ps(vt0, vs0);
+ vt1 = _mm256_mul_ps(vt1, vs1);
+ vt2 = _mm256_mul_ps(vt2, vs2);
+ vt3 = _mm256_mul_ps(vt3, vs3);
+
+ __m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
+ __m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
+ __m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
+ __m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
+
+ // For inputs below zero cutoff, replace output with +0.0f.
+ // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
+ vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
+ vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
+ vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
+ vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
+
+ // Accumulate computed exponents.
+ vacc0 = _mm256_add_ps(vacc0, vf0);
+ vacc0 = _mm256_add_ps(vacc0, vf1);
+ vacc0 = _mm256_add_ps(vacc0, vf2);
+ vacc0 = _mm256_add_ps(vacc0, vf3);
+ }
+
+ __m256 vacc = vacc0;
+ for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
+ // Load 8 inputs at a time.
+ const __m256 vi = _mm256_loadu_ps(input);
+ input += 8;
+
+ // Subtract maximum input x := i - i_max. This implies x <= 0.
+ const __m256 vx = _mm256_sub_ps(vi, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
+
+ // Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
+ // -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+
+ // Subtract the large number back to get final batch := round(x / log(2)).
+ vn = _mm256_sub_ps(vn, vmagic_bias);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+
+ // Reconstruct the final f value:
+ // f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
+ // = s + (t * s) * p
+ vt = _mm256_mul_ps(vt, vs);
+ __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
+
+ // For inputs below zero cutoff, replace output with +0.0f.
+ // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
+ vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
+
+ // Accumulate computed exponents.
+ vacc = _mm256_add_ps(vacc, vf);
+ }
+ if (batch != 0) {
+ assert(batch >= 1 * sizeof(float));
+ assert(batch <= 7 * sizeof(float));
+ const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
+
+ // Load up to 7 inputs at a time.
+ const __m256 vi = _mm256_maskload_ps(input, vmask);
+
+ // Subtract maximum input x := i - i_max. This implies x <= 0.
+ const __m256 vx = _mm256_sub_ps(vi, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
+
+ // Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
+ // -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+
+ // Subtract the large number back to get final batch := round(x / log(2)).
+ vn = _mm256_sub_ps(vn, vmagic_bias);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+
+ // Reconstruct the final f value:
+ // f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
+ // = s + (t * s) * p
+ vt = _mm256_mul_ps(vt, vs);
+ __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
+
+ // For inputs below zero cutoff, replace output with +0.0f.
+ // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
+ vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
+
+ // Accumulate computed exponents. And addend with mask to leave unmasked 32-bit lanes unchanged.
+ vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
+ }
+ // Reduce 8 batch in the SIMD register
+ __m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
+ vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
+ vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
+ _mm_store_ss(sum, vacc_lo);
+ _mm256_zeroupper();
+}
diff --git a/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64-acc2.c b/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64-acc2.c
new file mode 100644
index 000000000..fb921ad9f
--- /dev/null
+++ b/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64-acc2.c
@@ -0,0 +1,186 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddexpminusmax/avx512f-p5-scalef.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/raddexpminusmax.h>
+
+
+void xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64_acc2(
+ size_t batch,
+ const float* input,
+ float* sum,
+ float max)
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(sum != NULL);
+
+ const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+ const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
+ const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
+
+ const __m512 vc0 = _mm512_set1_ps(1.0f);
+ const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
+ const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
+ const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
+ const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
+ const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
+
+ const __m512 vi_max = _mm512_set1_ps(max);
+
+ __m512 vacc0 = _mm512_setzero_ps();
+ __m512 vacc1 = _mm512_setzero_ps();
+ for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
+ // Load 64 (4x16) inputs at a time.
+ const __m512 vi0 = _mm512_loadu_ps(input);
+ const __m512 vi1 = _mm512_loadu_ps(input + 16);
+ const __m512 vi2 = _mm512_loadu_ps(input + 32);
+ const __m512 vi3 = _mm512_loadu_ps(input + 48);
+ input += 64;
+
+ // Subtract maximum input x := i - i_max.
+ const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
+ const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
+ const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
+ const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
+ const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
+ const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
+ const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
+ __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
+ __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
+ __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
+
+ vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
+ vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
+ vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
+ vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
+ __m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
+ __m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
+ __m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
+
+ // Reconstruct the final f value:
+ // f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = 2**batch * p
+ const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
+ const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
+ const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
+ const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
+
+ // Accumulate computed exponents.
+ vacc0 = _mm512_add_ps(vacc0, vf0);
+ vacc1 = _mm512_add_ps(vacc1, vf1);
+ vacc0 = _mm512_add_ps(vacc0, vf2);
+ vacc1 = _mm512_add_ps(vacc1, vf3);
+ }
+ // Add up all accumulators to vacc0
+ vacc0 = _mm512_add_ps(vacc0, vacc1);
+
+ __m512 vacc = vacc0;
+ for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
+ // Load 16 inputs at a time.
+ const __m512 vi = _mm512_loadu_ps(input);
+ input += 16;
+
+ // Subtract maximum input x := i - i_max.
+ const __m512 vx = _mm512_sub_ps(vi, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ // Reconstruct the final f value:
+ // f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = 2**batch * p
+ const __m512 vf = _mm512_scalef_ps(vp, vn);
+
+ // Accumulate computed exponents.
+ vacc = _mm512_add_ps(vacc, vf);
+ }
+ if (batch != 0) {
+ // Prepare mask for valid 32-bit batch (depends on batch).
+ batch >>= XNN_LOG2_SIZEOF_FLOAT;
+ const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << batch) - UINT32_C(1)));
+
+ // Load up to 15 inputs at a time.
+ const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
+
+ // Subtract maximum input x := i - i_max.
+ const __m512 vx = _mm512_sub_ps(vi, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ // Reconstruct the final f value:
+ // f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = 2**batch * p
+ const __m512 vf = _mm512_scalef_ps(vp, vn);
+
+ // Accumulate computed exponents.
+ vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
+ }
+ *sum = _mm512_reduce_add_ps(vacc);
+}
diff --git a/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64-acc4.c b/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64-acc4.c
new file mode 100644
index 000000000..7f6a9691e
--- /dev/null
+++ b/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64-acc4.c
@@ -0,0 +1,190 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddexpminusmax/avx512f-p5-scalef.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/raddexpminusmax.h>
+
+
+void xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64_acc4(
+ size_t batch,
+ const float* input,
+ float* sum,
+ float max)
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(sum != NULL);
+
+ const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+ const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
+ const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
+
+ const __m512 vc0 = _mm512_set1_ps(1.0f);
+ const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
+ const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
+ const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
+ const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
+ const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
+
+ const __m512 vi_max = _mm512_set1_ps(max);
+
+ __m512 vacc0 = _mm512_setzero_ps();
+ __m512 vacc1 = _mm512_setzero_ps();
+ __m512 vacc2 = _mm512_setzero_ps();
+ __m512 vacc3 = _mm512_setzero_ps();
+ for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
+ // Load 64 (4x16) inputs at a time.
+ const __m512 vi0 = _mm512_loadu_ps(input);
+ const __m512 vi1 = _mm512_loadu_ps(input + 16);
+ const __m512 vi2 = _mm512_loadu_ps(input + 32);
+ const __m512 vi3 = _mm512_loadu_ps(input + 48);
+ input += 64;
+
+ // Subtract maximum input x := i - i_max.
+ const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
+ const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
+ const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
+ const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
+ const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
+ const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
+ const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
+ __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
+ __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
+ __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
+
+ vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
+ vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
+ vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
+ vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
+ __m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
+ __m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
+ __m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
+
+ // Reconstruct the final f value:
+ // f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = 2**batch * p
+ const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
+ const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
+ const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
+ const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
+
+ // Accumulate computed exponents.
+ vacc0 = _mm512_add_ps(vacc0, vf0);
+ vacc1 = _mm512_add_ps(vacc1, vf1);
+ vacc2 = _mm512_add_ps(vacc2, vf2);
+ vacc3 = _mm512_add_ps(vacc3, vf3);
+ }
+ // Add up all accumulators to vacc0
+ vacc0 = _mm512_add_ps(vacc0, vacc1);
+ vacc2 = _mm512_add_ps(vacc2, vacc3);
+ vacc0 = _mm512_add_ps(vacc0, vacc2);
+
+ __m512 vacc = vacc0;
+ for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
+ // Load 16 inputs at a time.
+ const __m512 vi = _mm512_loadu_ps(input);
+ input += 16;
+
+ // Subtract maximum input x := i - i_max.
+ const __m512 vx = _mm512_sub_ps(vi, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ // Reconstruct the final f value:
+ // f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = 2**batch * p
+ const __m512 vf = _mm512_scalef_ps(vp, vn);
+
+ // Accumulate computed exponents.
+ vacc = _mm512_add_ps(vacc, vf);
+ }
+ if (batch != 0) {
+ // Prepare mask for valid 32-bit batch (depends on batch).
+ batch >>= XNN_LOG2_SIZEOF_FLOAT;
+ const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << batch) - UINT32_C(1)));
+
+ // Load up to 15 inputs at a time.
+ const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
+
+ // Subtract maximum input x := i - i_max.
+ const __m512 vx = _mm512_sub_ps(vi, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ // Reconstruct the final f value:
+ // f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = 2**batch * p
+ const __m512 vf = _mm512_scalef_ps(vp, vn);
+
+ // Accumulate computed exponents.
+ vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
+ }
+ *sum = _mm512_reduce_add_ps(vacc);
+}
diff --git a/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64.c b/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64.c
new file mode 100644
index 000000000..bb7a75170
--- /dev/null
+++ b/src/f32-raddexpminusmax/gen/f32-raddexpminusmax-avx512f-p5-scalef-u64.c
@@ -0,0 +1,183 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddexpminusmax/avx512f-p5-scalef.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/raddexpminusmax.h>
+
+
+void xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64(
+ size_t batch,
+ const float* input,
+ float* sum,
+ float max)
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(sum != NULL);
+
+ const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+ const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
+ const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
+
+ const __m512 vc0 = _mm512_set1_ps(1.0f);
+ const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
+ const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
+ const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
+ const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
+ const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
+
+ const __m512 vi_max = _mm512_set1_ps(max);
+
+ __m512 vacc0 = _mm512_setzero_ps();
+ for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
+ // Load 64 (4x16) inputs at a time.
+ const __m512 vi0 = _mm512_loadu_ps(input);
+ const __m512 vi1 = _mm512_loadu_ps(input + 16);
+ const __m512 vi2 = _mm512_loadu_ps(input + 32);
+ const __m512 vi3 = _mm512_loadu_ps(input + 48);
+ input += 64;
+
+ // Subtract maximum input x := i - i_max.
+ const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
+ const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
+ const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
+ const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
+ const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
+ const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
+ const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
+ __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
+ __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
+ __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
+
+ vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
+ vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
+ vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
+ vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
+ __m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
+ __m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
+ __m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
+
+ // Reconstruct the final f value:
+ // f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = 2**batch * p
+ const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
+ const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
+ const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
+ const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
+
+ // Accumulate computed exponents.
+ vacc0 = _mm512_add_ps(vacc0, vf0);
+ vacc0 = _mm512_add_ps(vacc0, vf1);
+ vacc0 = _mm512_add_ps(vacc0, vf2);
+ vacc0 = _mm512_add_ps(vacc0, vf3);
+ }
+
+ __m512 vacc = vacc0;
+ for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
+ // Load 16 inputs at a time.
+ const __m512 vi = _mm512_loadu_ps(input);
+ input += 16;
+
+ // Subtract maximum input x := i - i_max.
+ const __m512 vx = _mm512_sub_ps(vi, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ // Reconstruct the final f value:
+ // f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = 2**batch * p
+ const __m512 vf = _mm512_scalef_ps(vp, vn);
+
+ // Accumulate computed exponents.
+ vacc = _mm512_add_ps(vacc, vf);
+ }
+ if (batch != 0) {
+ // Prepare mask for valid 32-bit batch (depends on batch).
+ batch >>= XNN_LOG2_SIZEOF_FLOAT;
+ const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << batch) - UINT32_C(1)));
+
+ // Load up to 15 inputs at a time.
+ const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
+
+ // Subtract maximum input x := i - i_max.
+ const __m512 vx = _mm512_sub_ps(vi, vi_max);
+
+ // Compute reduced argument batch := round(x / log(2)).
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ // Compute reduced argument t := x - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ // Reconstruct the final f value:
+ // f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
+ // = 2**batch * p
+ const __m512 vf = _mm512_scalef_ps(vp, vn);
+
+ // Accumulate computed exponents.
+ vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
+ }
+ *sum = _mm512_reduce_add_ps(vacc);
+}
diff --git a/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32-acc2.c b/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32-acc2.c
new file mode 100644
index 000000000..bee42beae
--- /dev/null
+++ b/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32-acc2.c
@@ -0,0 +1,264 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddextexp/avx2-p5.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/raddextexp.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_raddextexp_ukernel__avx2_p5_u32_acc2(
+ size_t batch,
+ const float* input,
+ float* sum)
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(sum != NULL);
+
+ const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+ const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
+ const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
+
+ // The smallest batch such that 2**batch is considered non-negligible.
+ // For smaller batch, 2**batch is replaced with zero.
+ const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
+ const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+ const __m256 vminus_inf = _mm256_set1_ps(-INFINITY);
+
+ const __m256 vc0 = _mm256_set1_ps(1.0f);
+ const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
+ const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
+ const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
+ const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
+ const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
+
+ __m256 vaccv0 = _mm256_setzero_ps();
+ __m256 vaccv1 = _mm256_setzero_ps();
+ __m256 vacce0 = vminus_inf;
+ __m256 vacce1 = vminus_inf;
+ for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
+ // Load 32 (4x8) inputs at a time.
+ const __m256 vx0 = _mm256_loadu_ps(input);
+ const __m256 vx1 = _mm256_loadu_ps(input + 8);
+ const __m256 vx2 = _mm256_loadu_ps(input + 16);
+ const __m256 vx3 = _mm256_loadu_ps(input + 24);
+ input += 32;
+
+ // Compute reduced argument batch := round(input / log(2)).
+ const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
+ __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
+ __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
+ __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
+
+ vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
+ vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
+ vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
+ vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
+ __m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
+ __m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
+ __m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
+ // - vnX is "exponent"
+ // - vpX is "mantissa"
+ //
+ // exp2(ae) * av + exp2(be) * bv =
+ // = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
+ // = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
+ // = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
+ //
+ // For computational efficiency we may add several "extended" floating-point numbers at a time.
+ __m256 vmax_e0 = _mm256_max_ps(vacce0, vn0);
+ __m256 vmax_e1 = _mm256_max_ps(vacce1, vn1);
+ vmax_e0 = _mm256_max_ps(vmax_e0, vn2);
+ vmax_e1 = _mm256_max_ps(vmax_e1, vn3);
+
+ // For computational efficiency, replace exp2(delta_e) with 0.0f when delta_e <= -127.0.
+ // This replacement is done in two steps:
+ // 1. Clamp minimum delta_e at -127.0.
+ // 2. Map delta_e to scale factor 0.0 when delta_e == -127.0
+ const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_e0), vmin_exponent);
+ const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_e1), vmin_exponent);
+ const __m256 vdelta_e0 = _mm256_max_ps(_mm256_sub_ps(vn0, vmax_e0), vmin_exponent);
+ const __m256 vdelta_e1 = _mm256_max_ps(_mm256_sub_ps(vn1, vmax_e1), vmin_exponent);
+ const __m256 vdelta_e2 = _mm256_max_ps(_mm256_sub_ps(vn2, vmax_e0), vmin_exponent);
+ const __m256 vdelta_e3 = _mm256_max_ps(_mm256_sub_ps(vn3, vmax_e1), vmin_exponent);
+
+ // Convert delta-exponents into scale factors:
+ // - s = exp2(delta_e) when delta_e > -127.0
+ // - s = 0.0 when delta_e <= -127.0
+ //
+ // Note: delta-exponents can not exceed 0.0, thus scale factors can not exceed 1.0.
+ const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
+ const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
+ const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e0, vmagic_bias)), 23));
+ const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e1, vmagic_bias)), 23));
+ const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e2, vmagic_bias)), 23));
+ const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e3, vmagic_bias)), 23));
+
+ // Update accumulated "mantissa" and "exponent" values
+ vaccv0 = _mm256_mul_ps(vaccv0, vaccs0);
+ vaccv1 = _mm256_mul_ps(vaccv1, vaccs1);
+ vaccv0 = _mm256_fmadd_ps(vp0, vs0, vaccv0);
+ vaccv1 = _mm256_fmadd_ps(vp1, vs1, vaccv1);
+ vaccv0 = _mm256_fmadd_ps(vp2, vs2, vaccv0);
+ vaccv1 = _mm256_fmadd_ps(vp3, vs3, vaccv1);
+
+ vacce0 = vmax_e0;
+ vacce1 = vmax_e1;
+ }
+
+ // Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
+ const __m256 vmax_acce01 = _mm256_max_ps(vacce0, vacce1);
+
+ const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_acce01), vmin_exponent);
+ const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_acce01), vmin_exponent);
+
+ const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
+ const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
+
+ __m256 vaccv = _mm256_mul_ps(vaccv0, vaccs0);
+ vaccv = _mm256_fmadd_ps(vaccv1, vaccs1, vaccv);
+ __m256 vacce = vmax_acce01;
+
+ for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
+ // Load 8 inputs at a time.
+ const __m256 vx = _mm256_loadu_ps(input);
+ input += 8;
+
+ // Compute reduced argument batch := round(input / log(2)).
+ const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+ vp = _mm256_fmadd_ps(vp, vt, vc0);
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
+ const __m256 vmax_e = _mm256_max_ps(vacce, vn);
+
+ // For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
+ const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
+ const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
+
+ // Convert exponents into scale factors.
+ const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
+
+ // Update accumulated "mantissa" and "exponent" values.
+ vaccv = _mm256_mul_ps(vaccv, vaccs);
+ vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
+
+ vacce = vmax_e;
+ }
+ if XNN_UNLIKELY(batch != 0) {
+ assert(batch >= 1 * sizeof(float));
+ assert(batch <= 7 * sizeof(float));
+ const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
+
+ // Load up to 7 inputs at a time.
+ const __m256 vx = _mm256_maskload_ps(input, vmask);
+
+ // Compute reduced argument batch := round(input / log(2)).
+ __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Correct reduced argument batch for masked out batch.
+ vn = _mm256_blendv_ps(vacce, vn, _mm256_castsi256_ps(vmask));
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+ vp = _mm256_fmadd_ps(vp, vt, vc0);
+ vp = _mm256_and_ps(vp, _mm256_castsi256_ps(vmask));
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
+ const __m256 vmax_e = _mm256_max_ps(vacce, vn);
+
+ // For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
+ const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
+ const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
+
+ // Convert exponents into scale factors.
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
+ const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
+
+ // Update accumulated "mantissa" and "exponent" values.
+ vaccv = _mm256_mul_ps(vaccv, vaccs);
+ vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
+
+ vacce = vmax_e;
+ }
+
+ // Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
+ __m256 vmax_acce = _mm256_max_ps(vacce, _mm256_permute2f128_ps(vacce, vacce, 1));
+ vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(1, 0, 3, 2)));
+ vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(2, 3, 0, 1)));
+ const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_acce), vmin_exponent);
+ const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
+
+ vaccv = _mm256_mul_ps(vaccv, vaccs);
+ __m128 vaccv_sum = _mm_add_ps(_mm256_castps256_ps128(vaccv), _mm256_extractf128_ps(vaccv, 1));
+ vaccv_sum = _mm_add_ps(vaccv_sum, _mm_movehl_ps(vaccv_sum, vaccv_sum));
+ vaccv_sum = _mm_add_ss(vaccv_sum, _mm_movehdup_ps(vaccv_sum));
+
+ _mm_store_ss(&sum[0], vaccv_sum);
+ _mm_store_ss(&sum[1], _mm256_castps256_ps128(vmax_acce));
+
+ _mm256_zeroupper();
+}
diff --git a/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32-acc4.c b/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32-acc4.c
new file mode 100644
index 000000000..9a7b695fa
--- /dev/null
+++ b/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32-acc4.c
@@ -0,0 +1,284 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddextexp/avx2-p5.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/raddextexp.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_raddextexp_ukernel__avx2_p5_u32_acc4(
+ size_t batch,
+ const float* input,
+ float* sum)
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(sum != NULL);
+
+ const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+ const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
+ const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
+
+ // The smallest batch such that 2**batch is considered non-negligible.
+ // For smaller batch, 2**batch is replaced with zero.
+ const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
+ const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+ const __m256 vminus_inf = _mm256_set1_ps(-INFINITY);
+
+ const __m256 vc0 = _mm256_set1_ps(1.0f);
+ const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
+ const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
+ const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
+ const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
+ const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
+
+ __m256 vaccv0 = _mm256_setzero_ps();
+ __m256 vaccv1 = _mm256_setzero_ps();
+ __m256 vaccv2 = _mm256_setzero_ps();
+ __m256 vaccv3 = _mm256_setzero_ps();
+ __m256 vacce0 = vminus_inf;
+ __m256 vacce1 = vminus_inf;
+ __m256 vacce2 = vminus_inf;
+ __m256 vacce3 = vminus_inf;
+ for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
+ // Load 32 (4x8) inputs at a time.
+ const __m256 vx0 = _mm256_loadu_ps(input);
+ const __m256 vx1 = _mm256_loadu_ps(input + 8);
+ const __m256 vx2 = _mm256_loadu_ps(input + 16);
+ const __m256 vx3 = _mm256_loadu_ps(input + 24);
+ input += 32;
+
+ // Compute reduced argument batch := round(input / log(2)).
+ const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
+ __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
+ __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
+ __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
+
+ vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
+ vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
+ vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
+ vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
+ __m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
+ __m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
+ __m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
+ // - vnX is "exponent"
+ // - vpX is "mantissa"
+ //
+ // exp2(ae) * av + exp2(be) * bv =
+ // = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
+ // = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
+ // = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
+ //
+ // For computational efficiency we may add several "extended" floating-point numbers at a time.
+ __m256 vmax_e0 = _mm256_max_ps(vacce0, vn0);
+ __m256 vmax_e1 = _mm256_max_ps(vacce1, vn1);
+ __m256 vmax_e2 = _mm256_max_ps(vacce2, vn2);
+ __m256 vmax_e3 = _mm256_max_ps(vacce3, vn3);
+
+ // For computational efficiency, replace exp2(delta_e) with 0.0f when delta_e <= -127.0.
+ // This replacement is done in two steps:
+ // 1. Clamp minimum delta_e at -127.0.
+ // 2. Map delta_e to scale factor 0.0 when delta_e == -127.0
+ const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_e0), vmin_exponent);
+ const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_e1), vmin_exponent);
+ const __m256 vdelta_acce2 = _mm256_max_ps(_mm256_sub_ps(vacce2, vmax_e2), vmin_exponent);
+ const __m256 vdelta_acce3 = _mm256_max_ps(_mm256_sub_ps(vacce3, vmax_e3), vmin_exponent);
+ const __m256 vdelta_e0 = _mm256_max_ps(_mm256_sub_ps(vn0, vmax_e0), vmin_exponent);
+ const __m256 vdelta_e1 = _mm256_max_ps(_mm256_sub_ps(vn1, vmax_e1), vmin_exponent);
+ const __m256 vdelta_e2 = _mm256_max_ps(_mm256_sub_ps(vn2, vmax_e2), vmin_exponent);
+ const __m256 vdelta_e3 = _mm256_max_ps(_mm256_sub_ps(vn3, vmax_e3), vmin_exponent);
+
+ // Convert delta-exponents into scale factors:
+ // - s = exp2(delta_e) when delta_e > -127.0
+ // - s = 0.0 when delta_e <= -127.0
+ //
+ // Note: delta-exponents can not exceed 0.0, thus scale factors can not exceed 1.0.
+ const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
+ const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
+ const __m256 vaccs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce2, vmagic_bias)), 23));
+ const __m256 vaccs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce3, vmagic_bias)), 23));
+ const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e0, vmagic_bias)), 23));
+ const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e1, vmagic_bias)), 23));
+ const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e2, vmagic_bias)), 23));
+ const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e3, vmagic_bias)), 23));
+
+ // Update accumulated "mantissa" and "exponent" values
+ vaccv0 = _mm256_mul_ps(vaccv0, vaccs0);
+ vaccv1 = _mm256_mul_ps(vaccv1, vaccs1);
+ vaccv2 = _mm256_mul_ps(vaccv2, vaccs2);
+ vaccv3 = _mm256_mul_ps(vaccv3, vaccs3);
+ vaccv0 = _mm256_fmadd_ps(vp0, vs0, vaccv0);
+ vaccv1 = _mm256_fmadd_ps(vp1, vs1, vaccv1);
+ vaccv2 = _mm256_fmadd_ps(vp2, vs2, vaccv2);
+ vaccv3 = _mm256_fmadd_ps(vp3, vs3, vaccv3);
+
+ vacce0 = vmax_e0;
+ vacce1 = vmax_e1;
+ vacce2 = vmax_e2;
+ vacce3 = vmax_e3;
+ }
+
+ // Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
+ const __m256 vmax_acce01 = _mm256_max_ps(vacce0, vacce1);
+ const __m256 vmax_acce23 = _mm256_max_ps(vacce2, vacce3);
+ const __m256 vmax_acce0123 = _mm256_max_ps(vmax_acce01, vmax_acce23);
+
+ const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_acce0123), vmin_exponent);
+ const __m256 vdelta_acce1 = _mm256_max_ps(_mm256_sub_ps(vacce1, vmax_acce0123), vmin_exponent);
+ const __m256 vdelta_acce2 = _mm256_max_ps(_mm256_sub_ps(vacce2, vmax_acce0123), vmin_exponent);
+ const __m256 vdelta_acce3 = _mm256_max_ps(_mm256_sub_ps(vacce3, vmax_acce0123), vmin_exponent);
+
+ const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
+ const __m256 vaccs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce1, vmagic_bias)), 23));
+ const __m256 vaccs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce2, vmagic_bias)), 23));
+ const __m256 vaccs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce3, vmagic_bias)), 23));
+
+ __m256 vaccv = _mm256_mul_ps(vaccv0, vaccs0);
+ vaccv = _mm256_fmadd_ps(vaccv1, vaccs1, vaccv);
+ vaccv = _mm256_fmadd_ps(vaccv2, vaccs2, vaccv);
+ vaccv = _mm256_fmadd_ps(vaccv3, vaccs3, vaccv);
+ __m256 vacce = vmax_acce0123;
+
+ for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
+ // Load 8 inputs at a time.
+ const __m256 vx = _mm256_loadu_ps(input);
+ input += 8;
+
+ // Compute reduced argument batch := round(input / log(2)).
+ const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+ vp = _mm256_fmadd_ps(vp, vt, vc0);
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
+ const __m256 vmax_e = _mm256_max_ps(vacce, vn);
+
+ // For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
+ const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
+ const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
+
+ // Convert exponents into scale factors.
+ const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
+
+ // Update accumulated "mantissa" and "exponent" values.
+ vaccv = _mm256_mul_ps(vaccv, vaccs);
+ vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
+
+ vacce = vmax_e;
+ }
+ if XNN_UNLIKELY(batch != 0) {
+ assert(batch >= 1 * sizeof(float));
+ assert(batch <= 7 * sizeof(float));
+ const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
+
+ // Load up to 7 inputs at a time.
+ const __m256 vx = _mm256_maskload_ps(input, vmask);
+
+ // Compute reduced argument batch := round(input / log(2)).
+ __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Correct reduced argument batch for masked out batch.
+ vn = _mm256_blendv_ps(vacce, vn, _mm256_castsi256_ps(vmask));
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+ vp = _mm256_fmadd_ps(vp, vt, vc0);
+ vp = _mm256_and_ps(vp, _mm256_castsi256_ps(vmask));
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
+ const __m256 vmax_e = _mm256_max_ps(vacce, vn);
+
+ // For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
+ const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
+ const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
+
+ // Convert exponents into scale factors.
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
+ const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
+
+ // Update accumulated "mantissa" and "exponent" values.
+ vaccv = _mm256_mul_ps(vaccv, vaccs);
+ vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
+
+ vacce = vmax_e;
+ }
+
+ // Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
+ __m256 vmax_acce = _mm256_max_ps(vacce, _mm256_permute2f128_ps(vacce, vacce, 1));
+ vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(1, 0, 3, 2)));
+ vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(2, 3, 0, 1)));
+ const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_acce), vmin_exponent);
+ const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
+
+ vaccv = _mm256_mul_ps(vaccv, vaccs);
+ __m128 vaccv_sum = _mm_add_ps(_mm256_castps256_ps128(vaccv), _mm256_extractf128_ps(vaccv, 1));
+ vaccv_sum = _mm_add_ps(vaccv_sum, _mm_movehl_ps(vaccv_sum, vaccv_sum));
+ vaccv_sum = _mm_add_ss(vaccv_sum, _mm_movehdup_ps(vaccv_sum));
+
+ _mm_store_ss(&sum[0], vaccv_sum);
+ _mm_store_ss(&sum[1], _mm256_castps256_ps128(vmax_acce));
+
+ _mm256_zeroupper();
+}
diff --git a/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32.c b/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32.c
new file mode 100644
index 000000000..8f391f369
--- /dev/null
+++ b/src/f32-raddextexp/gen/f32-raddextexp-avx2-p5-u32.c
@@ -0,0 +1,249 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddextexp/avx2-p5.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/raddextexp.h>
+
+
+static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
+
+void xnn_f32_raddextexp_ukernel__avx2_p5_u32(
+ size_t batch,
+ const float* input,
+ float* sum)
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(sum != NULL);
+
+ const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
+ const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
+ const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
+
+ // The smallest batch such that 2**batch is considered non-negligible.
+ // For smaller batch, 2**batch is replaced with zero.
+ const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
+ const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
+ const __m256 vminus_inf = _mm256_set1_ps(-INFINITY);
+
+ const __m256 vc0 = _mm256_set1_ps(1.0f);
+ const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
+ const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
+ const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
+ const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
+ const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
+
+ __m256 vaccv0 = _mm256_setzero_ps();
+ __m256 vacce0 = vminus_inf;
+ for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
+ // Load 32 (4x8) inputs at a time.
+ const __m256 vx0 = _mm256_loadu_ps(input);
+ const __m256 vx1 = _mm256_loadu_ps(input + 8);
+ const __m256 vx2 = _mm256_loadu_ps(input + 16);
+ const __m256 vx3 = _mm256_loadu_ps(input + 24);
+ input += 32;
+
+ // Compute reduced argument batch := round(input / log(2)).
+ const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
+ __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
+ __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
+ __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
+
+ vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
+ vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
+ vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
+ vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
+ __m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
+ __m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
+ __m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
+ // - vnX is "exponent"
+ // - vpX is "mantissa"
+ //
+ // exp2(ae) * av + exp2(be) * bv =
+ // = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
+ // = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
+ // = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
+ //
+ // For computational efficiency we may add several "extended" floating-point numbers at a time.
+ __m256 vmax_e0 = _mm256_max_ps(vacce0, vn0);
+ vmax_e0 = _mm256_max_ps(vmax_e0, vn1);
+ vmax_e0 = _mm256_max_ps(vmax_e0, vn2);
+ vmax_e0 = _mm256_max_ps(vmax_e0, vn3);
+
+ // For computational efficiency, replace exp2(delta_e) with 0.0f when delta_e <= -127.0.
+ // This replacement is done in two steps:
+ // 1. Clamp minimum delta_e at -127.0.
+ // 2. Map delta_e to scale factor 0.0 when delta_e == -127.0
+ const __m256 vdelta_acce0 = _mm256_max_ps(_mm256_sub_ps(vacce0, vmax_e0), vmin_exponent);
+ const __m256 vdelta_e0 = _mm256_max_ps(_mm256_sub_ps(vn0, vmax_e0), vmin_exponent);
+ const __m256 vdelta_e1 = _mm256_max_ps(_mm256_sub_ps(vn1, vmax_e0), vmin_exponent);
+ const __m256 vdelta_e2 = _mm256_max_ps(_mm256_sub_ps(vn2, vmax_e0), vmin_exponent);
+ const __m256 vdelta_e3 = _mm256_max_ps(_mm256_sub_ps(vn3, vmax_e0), vmin_exponent);
+
+ // Convert delta-exponents into scale factors:
+ // - s = exp2(delta_e) when delta_e > -127.0
+ // - s = 0.0 when delta_e <= -127.0
+ //
+ // Note: delta-exponents can not exceed 0.0, thus scale factors can not exceed 1.0.
+ const __m256 vaccs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce0, vmagic_bias)), 23));
+ const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e0, vmagic_bias)), 23));
+ const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e1, vmagic_bias)), 23));
+ const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e2, vmagic_bias)), 23));
+ const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e3, vmagic_bias)), 23));
+
+ // Update accumulated "mantissa" and "exponent" values
+ vaccv0 = _mm256_mul_ps(vaccv0, vaccs0);
+ vaccv0 = _mm256_fmadd_ps(vp0, vs0, vaccv0);
+ vaccv0 = _mm256_fmadd_ps(vp1, vs1, vaccv0);
+ vaccv0 = _mm256_fmadd_ps(vp2, vs2, vaccv0);
+ vaccv0 = _mm256_fmadd_ps(vp3, vs3, vaccv0);
+
+ vacce0 = vmax_e0;
+ }
+
+ // Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
+ __m256 vaccv = vaccv0;
+ __m256 vacce = vacce0;
+
+ for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
+ // Load 8 inputs at a time.
+ const __m256 vx = _mm256_loadu_ps(input);
+ input += 8;
+
+ // Compute reduced argument batch := round(input / log(2)).
+ const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+ vp = _mm256_fmadd_ps(vp, vt, vc0);
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
+ const __m256 vmax_e = _mm256_max_ps(vacce, vn);
+
+ // For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
+ const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
+ const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
+
+ // Convert exponents into scale factors.
+ const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
+
+ // Update accumulated "mantissa" and "exponent" values.
+ vaccv = _mm256_mul_ps(vaccv, vaccs);
+ vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
+
+ vacce = vmax_e;
+ }
+ if XNN_UNLIKELY(batch != 0) {
+ assert(batch >= 1 * sizeof(float));
+ assert(batch <= 7 * sizeof(float));
+ const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
+
+ // Load up to 7 inputs at a time.
+ const __m256 vx = _mm256_maskload_ps(input, vmask);
+
+ // Compute reduced argument batch := round(input / log(2)).
+ __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Correct reduced argument batch for masked out batch.
+ vn = _mm256_blendv_ps(vacce, vn, _mm256_castsi256_ps(vmask));
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+ vp = _mm256_fmadd_ps(vp, vt, vc0);
+ vp = _mm256_and_ps(vp, _mm256_castsi256_ps(vmask));
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
+ const __m256 vmax_e = _mm256_max_ps(vacce, vn);
+
+ // For computational efficiency, clamp minimum exp2(delta_e) at -127.0. It will be mapped to 0.0 scale factor later.
+ const __m256 vdelta_e = _mm256_max_ps(_mm256_sub_ps(vn, vmax_e), vmin_exponent);
+ const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_e), vmin_exponent);
+
+ // Convert exponents into scale factors.
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_e, vmagic_bias)), 23));
+ const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
+
+ // Update accumulated "mantissa" and "exponent" values.
+ vaccv = _mm256_mul_ps(vaccv, vaccs);
+ vaccv = _mm256_fmadd_ps(vp, vs, vaccv);
+
+ vacce = vmax_e;
+ }
+
+ // Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
+ __m256 vmax_acce = _mm256_max_ps(vacce, _mm256_permute2f128_ps(vacce, vacce, 1));
+ vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(1, 0, 3, 2)));
+ vmax_acce = _mm256_max_ps(vmax_acce, _mm256_shuffle_ps(vmax_acce, vmax_acce, _MM_SHUFFLE(2, 3, 0, 1)));
+ const __m256 vdelta_acce = _mm256_max_ps(_mm256_sub_ps(vacce, vmax_acce), vmin_exponent);
+ const __m256 vaccs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(vdelta_acce, vmagic_bias)), 23));
+
+ vaccv = _mm256_mul_ps(vaccv, vaccs);
+ __m128 vaccv_sum = _mm_add_ps(_mm256_castps256_ps128(vaccv), _mm256_extractf128_ps(vaccv, 1));
+ vaccv_sum = _mm_add_ps(vaccv_sum, _mm_movehl_ps(vaccv_sum, vaccv_sum));
+ vaccv_sum = _mm_add_ss(vaccv_sum, _mm_movehdup_ps(vaccv_sum));
+
+ _mm_store_ss(&sum[0], vaccv_sum);
+ _mm_store_ss(&sum[1], _mm256_castps256_ps128(vmax_acce));
+
+ _mm256_zeroupper();
+}
diff --git a/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64-acc2.c b/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64-acc2.c
new file mode 100644
index 000000000..993742c7e
--- /dev/null
+++ b/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64-acc2.c
@@ -0,0 +1,212 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddextexp/avx512f-p5-scalef.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/raddextexp.h>
+
+
+void xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_u64_acc2(
+ size_t batch,
+ const float* input,
+ float* sum)
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(sum != NULL);
+
+ const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+ const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
+ const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
+
+ const __m512 vc0 = _mm512_set1_ps(1.0f);
+ const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
+ const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
+ const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
+ const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
+ const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
+
+ const __m512 vminus_inf = _mm512_set1_ps(-INFINITY);
+
+ __m512 vaccv0 = _mm512_setzero_ps();
+ __m512 vaccv1 = _mm512_setzero_ps();
+ __m512 vacce0 = vminus_inf;
+ __m512 vacce1 = vminus_inf;
+ for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
+ // Load 64 (4x16) inputs at a time.
+ const __m512 vx0 = _mm512_loadu_ps(input);
+ const __m512 vx1 = _mm512_loadu_ps(input + 16);
+ const __m512 vx2 = _mm512_loadu_ps(input + 32);
+ const __m512 vx3 = _mm512_loadu_ps(input + 48);
+ input += 64;
+
+ // Compute reduced argument batch := round(input / log(2)).
+ const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
+ const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
+ const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
+ const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
+ __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
+ __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
+ __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
+
+ vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
+ vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
+ vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
+ vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
+ __m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
+ __m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
+ __m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
+ // - vnX is "exponent"
+ // - vpX is "mantissa"
+ //
+ // exp2(ae) * av + exp2(be) * bv =
+ // = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
+ // = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
+ // = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
+ //
+ // For computational efficiency we add three "extended" floating-point numbers at a time.
+ __m512 vmax_e0 = _mm512_max_ps(vacce0, vn0);
+ __m512 vmax_e1 = _mm512_max_ps(vacce1, vn1);
+ vmax_e0 = _mm512_max_ps(vmax_e0, vn2);
+ vmax_e1 = _mm512_max_ps(vmax_e1, vn3);
+
+ const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_e0);
+ const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_e1);
+ const __m512 vdelta_e0 = _mm512_sub_ps(vn0, vmax_e0);
+ const __m512 vdelta_e1 = _mm512_sub_ps(vn1, vmax_e1);
+ const __m512 vdelta_e2 = _mm512_sub_ps(vn2, vmax_e0);
+ const __m512 vdelta_e3 = _mm512_sub_ps(vn3, vmax_e1);
+
+ // Update accumulated "mantissa" and "exponent" values
+ vaccv0 = _mm512_scalef_ps(vaccv0, vdelta_acce0);
+ vaccv1 = _mm512_scalef_ps(vaccv1, vdelta_acce1);
+ vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp0, vdelta_e0));
+ vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp1, vdelta_e1));
+ vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp2, vdelta_e2));
+ vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp3, vdelta_e3));
+
+ vacce0 = vmax_e0;
+ vacce1 = vmax_e1;
+ }
+
+ // Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
+ const __m512 vmax_acce01 = _mm512_max_ps(vacce0, vacce1);
+
+ const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_acce01);
+ const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_acce01);
+
+ __m512 vaccv = _mm512_scalef_ps(vaccv0, vdelta_acce0);
+ vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv1, vdelta_acce1));
+ __m512 vacce = vmax_acce01;
+
+ for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
+ // Load 16 inputs at a time.
+ const __m512 vx = _mm512_loadu_ps(input);
+ input += 16;
+
+ // Compute reduced argument batch := round(input / log(2)).
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
+ const __m512 vmax_e = _mm512_max_ps(vacce, vn);
+ const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
+ const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
+ vaccv = _mm512_scalef_ps(vaccv, vdelta_acce);
+ vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vp, vdelta_e));
+
+ vacce = vmax_e;
+ }
+ if XNN_UNLIKELY(batch != 0) {
+ // Prepare mask for valid 32-bit batch (depends on batch).
+ batch >>= XNN_LOG2_SIZEOF_FLOAT;
+ const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << batch) - UINT32_C(1)));
+
+ // Load up to 15 inputs at a time.
+ const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
+
+ // Compute reduced argument batch := round(input / log(2)).
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
+ const __m512 vmax_e = _mm512_mask_max_ps(vacce, vmask, vacce, vn);
+ const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
+ const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
+ vaccv = _mm512_mask_scalef_ps(vaccv, vmask, vaccv, vdelta_acce);
+ vaccv = _mm512_mask_add_ps(vaccv, vmask, vaccv, _mm512_maskz_scalef_ps(vmask, vp, vdelta_e));
+ vacce = vmax_e;
+ }
+
+ // Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
+ const float vmax_acce = _mm512_reduce_max_ps(vacce);
+ const __m512 vdelta_acce = _mm512_sub_ps(vacce, _mm512_set1_ps(vmax_acce));
+
+ sum[0] = _mm512_reduce_add_ps(_mm512_scalef_ps(vaccv, vdelta_acce));
+ sum[1] = vmax_acce;
+
+ _mm256_zeroupper();
+}
diff --git a/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64-acc4.c b/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64-acc4.c
new file mode 100644
index 000000000..4c67cdd45
--- /dev/null
+++ b/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64-acc4.c
@@ -0,0 +1,228 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddextexp/avx512f-p5-scalef.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/raddextexp.h>
+
+
+void xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_u64_acc4(
+ size_t batch,
+ const float* input,
+ float* sum)
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(sum != NULL);
+
+ const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+ const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
+ const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
+
+ const __m512 vc0 = _mm512_set1_ps(1.0f);
+ const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
+ const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
+ const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
+ const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
+ const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
+
+ const __m512 vminus_inf = _mm512_set1_ps(-INFINITY);
+
+ __m512 vaccv0 = _mm512_setzero_ps();
+ __m512 vaccv1 = _mm512_setzero_ps();
+ __m512 vaccv2 = _mm512_setzero_ps();
+ __m512 vaccv3 = _mm512_setzero_ps();
+ __m512 vacce0 = vminus_inf;
+ __m512 vacce1 = vminus_inf;
+ __m512 vacce2 = vminus_inf;
+ __m512 vacce3 = vminus_inf;
+ for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
+ // Load 64 (4x16) inputs at a time.
+ const __m512 vx0 = _mm512_loadu_ps(input);
+ const __m512 vx1 = _mm512_loadu_ps(input + 16);
+ const __m512 vx2 = _mm512_loadu_ps(input + 32);
+ const __m512 vx3 = _mm512_loadu_ps(input + 48);
+ input += 64;
+
+ // Compute reduced argument batch := round(input / log(2)).
+ const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
+ const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
+ const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
+ const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
+ __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
+ __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
+ __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
+
+ vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
+ vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
+ vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
+ vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
+ __m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
+ __m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
+ __m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
+ // - vnX is "exponent"
+ // - vpX is "mantissa"
+ //
+ // exp2(ae) * av + exp2(be) * bv =
+ // = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
+ // = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
+ // = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
+ //
+ // For computational efficiency we add three "extended" floating-point numbers at a time.
+ __m512 vmax_e0 = _mm512_max_ps(vacce0, vn0);
+ __m512 vmax_e1 = _mm512_max_ps(vacce1, vn1);
+ __m512 vmax_e2 = _mm512_max_ps(vacce2, vn2);
+ __m512 vmax_e3 = _mm512_max_ps(vacce3, vn3);
+
+ const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_e0);
+ const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_e1);
+ const __m512 vdelta_acce2 = _mm512_sub_ps(vacce2, vmax_e2);
+ const __m512 vdelta_acce3 = _mm512_sub_ps(vacce3, vmax_e3);
+ const __m512 vdelta_e0 = _mm512_sub_ps(vn0, vmax_e0);
+ const __m512 vdelta_e1 = _mm512_sub_ps(vn1, vmax_e1);
+ const __m512 vdelta_e2 = _mm512_sub_ps(vn2, vmax_e2);
+ const __m512 vdelta_e3 = _mm512_sub_ps(vn3, vmax_e3);
+
+ // Update accumulated "mantissa" and "exponent" values
+ vaccv0 = _mm512_scalef_ps(vaccv0, vdelta_acce0);
+ vaccv1 = _mm512_scalef_ps(vaccv1, vdelta_acce1);
+ vaccv2 = _mm512_scalef_ps(vaccv2, vdelta_acce2);
+ vaccv3 = _mm512_scalef_ps(vaccv3, vdelta_acce3);
+ vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp0, vdelta_e0));
+ vaccv1 = _mm512_add_ps(vaccv1, _mm512_scalef_ps(vp1, vdelta_e1));
+ vaccv2 = _mm512_add_ps(vaccv2, _mm512_scalef_ps(vp2, vdelta_e2));
+ vaccv3 = _mm512_add_ps(vaccv3, _mm512_scalef_ps(vp3, vdelta_e3));
+
+ vacce0 = vmax_e0;
+ vacce1 = vmax_e1;
+ vacce2 = vmax_e2;
+ vacce3 = vmax_e3;
+ }
+
+ // Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
+ const __m512 vmax_acce01 = _mm512_max_ps(vacce0, vacce1);
+ const __m512 vmax_acce23 = _mm512_max_ps(vacce2, vacce3);
+ const __m512 vmax_acce0123 = _mm512_max_ps(vmax_acce01, vmax_acce23);
+
+ const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_acce0123);
+ const __m512 vdelta_acce1 = _mm512_sub_ps(vacce1, vmax_acce0123);
+ const __m512 vdelta_acce2 = _mm512_sub_ps(vacce2, vmax_acce0123);
+ const __m512 vdelta_acce3 = _mm512_sub_ps(vacce3, vmax_acce0123);
+
+ __m512 vaccv = _mm512_scalef_ps(vaccv0, vdelta_acce0);
+ vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv1, vdelta_acce1));
+ vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv2, vdelta_acce2));
+ vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vaccv3, vdelta_acce3));
+ __m512 vacce = vmax_acce0123;
+
+ for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
+ // Load 16 inputs at a time.
+ const __m512 vx = _mm512_loadu_ps(input);
+ input += 16;
+
+ // Compute reduced argument batch := round(input / log(2)).
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
+ const __m512 vmax_e = _mm512_max_ps(vacce, vn);
+ const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
+ const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
+ vaccv = _mm512_scalef_ps(vaccv, vdelta_acce);
+ vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vp, vdelta_e));
+
+ vacce = vmax_e;
+ }
+ if XNN_UNLIKELY(batch != 0) {
+ // Prepare mask for valid 32-bit batch (depends on batch).
+ batch >>= XNN_LOG2_SIZEOF_FLOAT;
+ const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << batch) - UINT32_C(1)));
+
+ // Load up to 15 inputs at a time.
+ const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
+
+ // Compute reduced argument batch := round(input / log(2)).
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
+ const __m512 vmax_e = _mm512_mask_max_ps(vacce, vmask, vacce, vn);
+ const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
+ const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
+ vaccv = _mm512_mask_scalef_ps(vaccv, vmask, vaccv, vdelta_acce);
+ vaccv = _mm512_mask_add_ps(vaccv, vmask, vaccv, _mm512_maskz_scalef_ps(vmask, vp, vdelta_e));
+ vacce = vmax_e;
+ }
+
+ // Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
+ const float vmax_acce = _mm512_reduce_max_ps(vacce);
+ const __m512 vdelta_acce = _mm512_sub_ps(vacce, _mm512_set1_ps(vmax_acce));
+
+ sum[0] = _mm512_reduce_add_ps(_mm512_scalef_ps(vaccv, vdelta_acce));
+ sum[1] = vmax_acce;
+
+ _mm256_zeroupper();
+}
diff --git a/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64.c b/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64.c
new file mode 100644
index 000000000..2b817af75
--- /dev/null
+++ b/src/f32-raddextexp/gen/f32-raddextexp-avx512f-p5-scalef-u64.c
@@ -0,0 +1,201 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddextexp/avx512f-p5-scalef.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+#include <math.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/raddextexp.h>
+
+
+void xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_u64(
+ size_t batch,
+ const float* input,
+ float* sum)
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(sum != NULL);
+
+ const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
+ const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
+ const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
+
+ const __m512 vc0 = _mm512_set1_ps(1.0f);
+ const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
+ const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
+ const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
+ const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
+ const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
+
+ const __m512 vminus_inf = _mm512_set1_ps(-INFINITY);
+
+ __m512 vaccv0 = _mm512_setzero_ps();
+ __m512 vacce0 = vminus_inf;
+ for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
+ // Load 64 (4x16) inputs at a time.
+ const __m512 vx0 = _mm512_loadu_ps(input);
+ const __m512 vx1 = _mm512_loadu_ps(input + 16);
+ const __m512 vx2 = _mm512_loadu_ps(input + 32);
+ const __m512 vx3 = _mm512_loadu_ps(input + 48);
+ input += 64;
+
+ // Compute reduced argument batch := round(input / log(2)).
+ const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
+ const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
+ const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
+ const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
+ __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
+ __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
+ __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
+
+ vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
+ vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
+ vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
+ vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
+ __m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
+ __m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
+ __m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation where
+ // - vnX is "exponent"
+ // - vpX is "mantissa"
+ //
+ // exp2(ae) * av + exp2(be) * bv =
+ // = exp2(max(ae, be)) * exp2(ae - max(ae, be)) * av + exp2(max(ae, be)) * exp2(be - max(ae, be)) * bv
+ // = exp2(max_e) * (exp2(ae - max_e) * av + exp2(be - max_e) * bv)
+ // = exp2(max_e) * (exp2(delta_ae) * av + exp2(delta_be) * bv)
+ //
+ // For computational efficiency we add three "extended" floating-point numbers at a time.
+ __m512 vmax_e0 = _mm512_max_ps(vacce0, vn0);
+ vmax_e0 = _mm512_max_ps(vmax_e0, vn1);
+ vmax_e0 = _mm512_max_ps(vmax_e0, vn2);
+ vmax_e0 = _mm512_max_ps(vmax_e0, vn3);
+
+ const __m512 vdelta_acce0 = _mm512_sub_ps(vacce0, vmax_e0);
+ const __m512 vdelta_e0 = _mm512_sub_ps(vn0, vmax_e0);
+ const __m512 vdelta_e1 = _mm512_sub_ps(vn1, vmax_e0);
+ const __m512 vdelta_e2 = _mm512_sub_ps(vn2, vmax_e0);
+ const __m512 vdelta_e3 = _mm512_sub_ps(vn3, vmax_e0);
+
+ // Update accumulated "mantissa" and "exponent" values
+ vaccv0 = _mm512_scalef_ps(vaccv0, vdelta_acce0);
+ vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp0, vdelta_e0));
+ vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp1, vdelta_e1));
+ vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp2, vdelta_e2));
+ vaccv0 = _mm512_add_ps(vaccv0, _mm512_scalef_ps(vp3, vdelta_e3));
+
+ vacce0 = vmax_e0;
+ }
+
+ // Reduce partial sums of "extended" floating-point numbers into a single "extended" SIMD vector of sums.
+ __m512 vaccv = vaccv0;
+ __m512 vacce = vacce0;
+
+ for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
+ // Load 16 inputs at a time.
+ const __m512 vx = _mm512_loadu_ps(input);
+ input += 16;
+
+ // Compute reduced argument batch := round(input / log(2)).
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
+ const __m512 vmax_e = _mm512_max_ps(vacce, vn);
+ const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
+ const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
+ vaccv = _mm512_scalef_ps(vaccv, vdelta_acce);
+ vaccv = _mm512_add_ps(vaccv, _mm512_scalef_ps(vp, vdelta_e));
+
+ vacce = vmax_e;
+ }
+ if XNN_UNLIKELY(batch != 0) {
+ // Prepare mask for valid 32-bit batch (depends on batch).
+ batch >>= XNN_LOG2_SIZEOF_FLOAT;
+ const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << batch) - UINT32_C(1)));
+
+ // Load up to 15 inputs at a time.
+ const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
+
+ // Compute reduced argument batch := round(input / log(2)).
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ // Compute reduced argument t := input - batch * log(2).
+ // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
+ __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
+ vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
+
+ // Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ // Accumulate "extended" floating-point numbers in ("mantissa", "exponent") representation.
+ const __m512 vmax_e = _mm512_mask_max_ps(vacce, vmask, vacce, vn);
+ const __m512 vdelta_acce = _mm512_sub_ps(vacce, vmax_e);
+ const __m512 vdelta_e = _mm512_sub_ps(vn, vmax_e);
+ vaccv = _mm512_mask_scalef_ps(vaccv, vmask, vaccv, vdelta_acce);
+ vaccv = _mm512_mask_add_ps(vaccv, vmask, vaccv, _mm512_maskz_scalef_ps(vmask, vp, vdelta_e));
+ vacce = vmax_e;
+ }
+
+ // Reduce partial sums of "extended" floating-point numbers into a single "extended" floating-point sum.
+ const float vmax_acce = _mm512_reduce_max_ps(vacce);
+ const __m512 vdelta_acce = _mm512_sub_ps(vacce, _mm512_set1_ps(vmax_acce));
+
+ sum[0] = _mm512_reduce_add_ps(_mm512_scalef_ps(vaccv, vdelta_acce));
+ sum[1] = vmax_acce;
+
+ _mm256_zeroupper();
+}
diff --git a/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32-acc2.c b/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32-acc2.c
new file mode 100644
index 000000000..8cb194533
--- /dev/null
+++ b/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32-acc2.c
@@ -0,0 +1,204 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/raddstoreexpminusmax.h>
+
+
+void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32_acc2(
+ size_t batch,
+ const float* input,
+ const float* max,
+ float* output,
+ float* sum,
+ const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(max != NULL);
+ assert(output != NULL);
+ assert(sum != NULL);
+
+ const __m256 vi_max = _mm256_broadcast_ss(max);
+ const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
+ const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
+ const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
+ const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
+ const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
+ const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
+ const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
+ const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
+ const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
+
+ __m256 vacc0 = _mm256_setzero_ps();
+ __m256 vacc1 = _mm256_setzero_ps();
+ for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
+ const __m256 vi0 = _mm256_loadu_ps(input);
+ const __m256 vi1 = _mm256_loadu_ps(input + 8);
+ const __m256 vi2 = _mm256_loadu_ps(input + 16);
+ const __m256 vi3 = _mm256_loadu_ps(input + 24);
+ input += 32;
+
+ const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
+ const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
+ const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
+ const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
+
+ __m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
+ __m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
+ __m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
+ __m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
+
+ const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
+ const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
+ const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
+ const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
+
+ vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+ vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+ vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+ vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+
+ __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
+ __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
+ __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
+ __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
+
+ __m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
+ __m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
+ __m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
+ __m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
+
+ vt0 = _mm256_mul_ps(vt0, vs0);
+ vt1 = _mm256_mul_ps(vt1, vs1);
+ vt2 = _mm256_mul_ps(vt2, vs2);
+ vt3 = _mm256_mul_ps(vt3, vs3);
+
+ __m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
+ __m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
+ __m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
+ __m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
+
+ vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
+ vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
+ vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
+ vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
+
+ _mm256_storeu_ps(output, vf0);
+ _mm256_storeu_ps(output + 8, vf1);
+ _mm256_storeu_ps(output + 16, vf2);
+ _mm256_storeu_ps(output + 24, vf3);
+ output += 32;
+
+ vacc0 = _mm256_add_ps(vacc0, vf0);
+ vacc1 = _mm256_add_ps(vacc1, vf1);
+ vacc0 = _mm256_add_ps(vacc0, vf2);
+ vacc1 = _mm256_add_ps(vacc1, vf3);
+ }
+ vacc0 = _mm256_add_ps(vacc0, vacc1);
+
+ __m256 vacc = vacc0;
+ for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
+ const __m256 vi = _mm256_loadu_ps(input);
+ input += 8;
+
+ const __m256 vx = _mm256_sub_ps(vi, vi_max);
+
+ __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
+
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+
+ vn = _mm256_sub_ps(vn, vmagic_bias);
+
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
+
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+
+ vt = _mm256_mul_ps(vt, vs);
+ __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
+
+ vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
+
+ _mm256_storeu_ps(output, vf);
+ output += 8;
+
+ vacc = _mm256_add_ps(vacc, vf);
+ }
+ if (batch != 0) {
+ assert(batch >= 1 * sizeof(float));
+ assert(batch <= 7 * sizeof(float));
+ const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx2_rr1_p5.mask_table[7] - batch));
+
+ const __m256 vi = _mm256_maskload_ps(input, vmask);
+
+ const __m256 vx = _mm256_sub_ps(vi, vi_max);
+
+ __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
+
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+
+ vn = _mm256_sub_ps(vn, vmagic_bias);
+
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
+
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+
+ vt = _mm256_mul_ps(vt, vs);
+ __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
+
+ vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
+
+ __m128 vf_lo = _mm256_castps256_ps128(vf);
+ if (batch & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, vf_lo);
+ vf_lo = _mm256_extractf128_ps(vf, 1);
+ output += 4;
+ }
+ if (batch & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, vf_lo);
+ vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
+ output += 2;
+ }
+ if (batch & (1 * sizeof(float))) {
+ _mm_store_ss(output, vf_lo);
+ }
+
+ vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
+ }
+ __m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
+ vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
+ vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
+ _mm_store_ss(sum, vacc_lo);
+ _mm256_zeroupper();
+}
diff --git a/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32-acc4.c b/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32-acc4.c
new file mode 100644
index 000000000..f5a54b839
--- /dev/null
+++ b/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32-acc4.c
@@ -0,0 +1,208 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/raddstoreexpminusmax.h>
+
+
+void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32_acc4(
+ size_t batch,
+ const float* input,
+ const float* max,
+ float* output,
+ float* sum,
+ const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(max != NULL);
+ assert(output != NULL);
+ assert(sum != NULL);
+
+ const __m256 vi_max = _mm256_broadcast_ss(max);
+ const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
+ const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
+ const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
+ const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
+ const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
+ const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
+ const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
+ const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
+ const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
+
+ __m256 vacc0 = _mm256_setzero_ps();
+ __m256 vacc1 = _mm256_setzero_ps();
+ __m256 vacc2 = _mm256_setzero_ps();
+ __m256 vacc3 = _mm256_setzero_ps();
+ for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
+ const __m256 vi0 = _mm256_loadu_ps(input);
+ const __m256 vi1 = _mm256_loadu_ps(input + 8);
+ const __m256 vi2 = _mm256_loadu_ps(input + 16);
+ const __m256 vi3 = _mm256_loadu_ps(input + 24);
+ input += 32;
+
+ const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
+ const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
+ const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
+ const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
+
+ __m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
+ __m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
+ __m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
+ __m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
+
+ const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
+ const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
+ const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
+ const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
+
+ vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+ vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+ vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+ vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+
+ __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
+ __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
+ __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
+ __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
+
+ __m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
+ __m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
+ __m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
+ __m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
+
+ vt0 = _mm256_mul_ps(vt0, vs0);
+ vt1 = _mm256_mul_ps(vt1, vs1);
+ vt2 = _mm256_mul_ps(vt2, vs2);
+ vt3 = _mm256_mul_ps(vt3, vs3);
+
+ __m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
+ __m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
+ __m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
+ __m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
+
+ vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
+ vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
+ vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
+ vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
+
+ _mm256_storeu_ps(output, vf0);
+ _mm256_storeu_ps(output + 8, vf1);
+ _mm256_storeu_ps(output + 16, vf2);
+ _mm256_storeu_ps(output + 24, vf3);
+ output += 32;
+
+ vacc0 = _mm256_add_ps(vacc0, vf0);
+ vacc1 = _mm256_add_ps(vacc1, vf1);
+ vacc2 = _mm256_add_ps(vacc2, vf2);
+ vacc3 = _mm256_add_ps(vacc3, vf3);
+ }
+ vacc0 = _mm256_add_ps(vacc0, vacc1);
+ vacc2 = _mm256_add_ps(vacc2, vacc3);
+ vacc0 = _mm256_add_ps(vacc0, vacc2);
+
+ __m256 vacc = vacc0;
+ for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
+ const __m256 vi = _mm256_loadu_ps(input);
+ input += 8;
+
+ const __m256 vx = _mm256_sub_ps(vi, vi_max);
+
+ __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
+
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+
+ vn = _mm256_sub_ps(vn, vmagic_bias);
+
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
+
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+
+ vt = _mm256_mul_ps(vt, vs);
+ __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
+
+ vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
+
+ _mm256_storeu_ps(output, vf);
+ output += 8;
+
+ vacc = _mm256_add_ps(vacc, vf);
+ }
+ if (batch != 0) {
+ assert(batch >= 1 * sizeof(float));
+ assert(batch <= 7 * sizeof(float));
+ const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx2_rr1_p5.mask_table[7] - batch));
+
+ const __m256 vi = _mm256_maskload_ps(input, vmask);
+
+ const __m256 vx = _mm256_sub_ps(vi, vi_max);
+
+ __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
+
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+
+ vn = _mm256_sub_ps(vn, vmagic_bias);
+
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
+
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+
+ vt = _mm256_mul_ps(vt, vs);
+ __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
+
+ vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
+
+ __m128 vf_lo = _mm256_castps256_ps128(vf);
+ if (batch & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, vf_lo);
+ vf_lo = _mm256_extractf128_ps(vf, 1);
+ output += 4;
+ }
+ if (batch & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, vf_lo);
+ vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
+ output += 2;
+ }
+ if (batch & (1 * sizeof(float))) {
+ _mm_store_ss(output, vf_lo);
+ }
+
+ vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
+ }
+ __m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
+ vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
+ vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
+ _mm_store_ss(sum, vacc_lo);
+ _mm256_zeroupper();
+}
diff --git a/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32.c b/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32.c
new file mode 100644
index 000000000..02ff019b5
--- /dev/null
+++ b/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx2-rr1-p5-u32.c
@@ -0,0 +1,202 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/raddstoreexpminusmax.h>
+
+
+void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32(
+ size_t batch,
+ const float* input,
+ const float* max,
+ float* output,
+ float* sum,
+ const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(max != NULL);
+ assert(output != NULL);
+ assert(sum != NULL);
+
+ const __m256 vi_max = _mm256_broadcast_ss(max);
+ const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
+ const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
+ const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
+ const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
+ const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
+ const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
+ const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
+ const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
+ const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
+
+ __m256 vacc0 = _mm256_setzero_ps();
+ for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
+ const __m256 vi0 = _mm256_loadu_ps(input);
+ const __m256 vi1 = _mm256_loadu_ps(input + 8);
+ const __m256 vi2 = _mm256_loadu_ps(input + 16);
+ const __m256 vi3 = _mm256_loadu_ps(input + 24);
+ input += 32;
+
+ const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
+ const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
+ const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
+ const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
+
+ __m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
+ __m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
+ __m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
+ __m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
+
+ const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
+ const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
+ const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
+ const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
+
+ vn0 = _mm256_sub_ps(vn0, vmagic_bias);
+ vn1 = _mm256_sub_ps(vn1, vmagic_bias);
+ vn2 = _mm256_sub_ps(vn2, vmagic_bias);
+ vn3 = _mm256_sub_ps(vn3, vmagic_bias);
+
+ __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
+ __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
+ __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
+ __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
+
+ __m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
+ __m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
+ __m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
+ __m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
+
+ vt0 = _mm256_mul_ps(vt0, vs0);
+ vt1 = _mm256_mul_ps(vt1, vs1);
+ vt2 = _mm256_mul_ps(vt2, vs2);
+ vt3 = _mm256_mul_ps(vt3, vs3);
+
+ __m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
+ __m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
+ __m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
+ __m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
+
+ vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
+ vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
+ vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
+ vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
+
+ _mm256_storeu_ps(output, vf0);
+ _mm256_storeu_ps(output + 8, vf1);
+ _mm256_storeu_ps(output + 16, vf2);
+ _mm256_storeu_ps(output + 24, vf3);
+ output += 32;
+
+ vacc0 = _mm256_add_ps(vacc0, vf0);
+ vacc0 = _mm256_add_ps(vacc0, vf1);
+ vacc0 = _mm256_add_ps(vacc0, vf2);
+ vacc0 = _mm256_add_ps(vacc0, vf3);
+ }
+
+ __m256 vacc = vacc0;
+ for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
+ const __m256 vi = _mm256_loadu_ps(input);
+ input += 8;
+
+ const __m256 vx = _mm256_sub_ps(vi, vi_max);
+
+ __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
+
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+
+ vn = _mm256_sub_ps(vn, vmagic_bias);
+
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
+
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+
+ vt = _mm256_mul_ps(vt, vs);
+ __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
+
+ vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
+
+ _mm256_storeu_ps(output, vf);
+ output += 8;
+
+ vacc = _mm256_add_ps(vacc, vf);
+ }
+ if (batch != 0) {
+ assert(batch >= 1 * sizeof(float));
+ assert(batch <= 7 * sizeof(float));
+ const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx2_rr1_p5.mask_table[7] - batch));
+
+ const __m256 vi = _mm256_maskload_ps(input, vmask);
+
+ const __m256 vx = _mm256_sub_ps(vi, vi_max);
+
+ __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
+
+ const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
+
+ vn = _mm256_sub_ps(vn, vmagic_bias);
+
+ __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
+
+ __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
+ vp = _mm256_fmadd_ps(vp, vt, vc3);
+ vp = _mm256_fmadd_ps(vp, vt, vc2);
+ vp = _mm256_fmadd_ps(vp, vt, vc1);
+
+ vt = _mm256_mul_ps(vt, vs);
+ __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
+
+ vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
+
+ __m128 vf_lo = _mm256_castps256_ps128(vf);
+ if (batch & (4 * sizeof(float))) {
+ _mm_storeu_ps(output, vf_lo);
+ vf_lo = _mm256_extractf128_ps(vf, 1);
+ output += 4;
+ }
+ if (batch & (2 * sizeof(float))) {
+ _mm_storel_pi((__m64*) output, vf_lo);
+ vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
+ output += 2;
+ }
+ if (batch & (1 * sizeof(float))) {
+ _mm_store_ss(output, vf_lo);
+ }
+
+ vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
+ }
+ __m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
+ vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
+ vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
+ _mm_store_ss(sum, vacc_lo);
+ _mm256_zeroupper();
+}
diff --git a/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64-acc2.c b/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64-acc2.c
new file mode 100644
index 000000000..76d1de49d
--- /dev/null
+++ b/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64-acc2.c
@@ -0,0 +1,160 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/raddstoreexpminusmax.h>
+
+
+void xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64_acc2(
+ size_t batch,
+ const float* input,
+ const float* max,
+ float* output,
+ float* sum,
+ const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(max != NULL);
+ assert(output != NULL);
+ assert(sum != NULL);
+
+ const __m512 vi_max = _mm512_set1_ps(*max);
+ const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
+ const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
+ const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
+ const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
+ const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
+ const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
+ const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
+ const __m512 vc0 = _mm512_set1_ps(params->avx512_rr1_p5.c0);
+
+ __m512 vacc0 = _mm512_setzero_ps();
+ __m512 vacc1 = _mm512_setzero_ps();
+ for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
+ const __m512 vi0 = _mm512_loadu_ps(input);
+ const __m512 vi1 = _mm512_loadu_ps(input + 16);
+ const __m512 vi2 = _mm512_loadu_ps(input + 32);
+ const __m512 vi3 = _mm512_loadu_ps(input + 48);
+ input += 64;
+
+ const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
+ const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
+ const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
+ const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
+
+ const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
+ const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
+ const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
+ const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
+
+ const __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vx0);
+ const __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vx1);
+ const __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vx2);
+ const __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vx3);
+
+ __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
+ __m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
+ __m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
+ __m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
+
+ const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
+ const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
+ const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
+ const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
+
+ _mm512_storeu_ps(output, vf0);
+ _mm512_storeu_ps(output + 16, vf1);
+ _mm512_storeu_ps(output + 32, vf2);
+ _mm512_storeu_ps(output + 48, vf3);
+ output += 64;
+
+ vacc0 = _mm512_add_ps(vacc0, vf0);
+ vacc1 = _mm512_add_ps(vacc1, vf1);
+ vacc0 = _mm512_add_ps(vacc0, vf2);
+ vacc1 = _mm512_add_ps(vacc1, vf3);
+ }
+ vacc0 = _mm512_add_ps(vacc0, vacc1);
+
+ __m512 vacc = vacc0;
+ for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
+ const __m512 vi = _mm512_loadu_ps(input);
+ input += 16;
+
+ const __m512 vx = _mm512_sub_ps(vi, vi_max);
+
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
+
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ const __m512 vf = _mm512_scalef_ps(vp, vn);
+
+ _mm512_storeu_ps(output, vf);
+ output += 16;
+
+ vacc = _mm512_add_ps(vacc, vf);
+ }
+ if (batch != 0) {
+ // Prepare mask for valid 32-bit batch (depends on batch).
+ batch >>= XNN_LOG2_SIZEOF_FLOAT;
+ const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << batch) - UINT32_C(1)));
+
+ const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
+
+ const __m512 vx = _mm512_sub_ps(vi, vi_max);
+
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
+
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ const __m512 vf = _mm512_scalef_ps(vp, vn);
+
+ _mm512_mask_storeu_ps(output, vmask, vf);
+
+ vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
+ }
+ *sum = _mm512_reduce_add_ps(vacc);
+}
diff --git a/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64-acc4.c b/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64-acc4.c
new file mode 100644
index 000000000..fe3beca33
--- /dev/null
+++ b/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64-acc4.c
@@ -0,0 +1,164 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/raddstoreexpminusmax.h>
+
+
+void xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64_acc4(
+ size_t batch,
+ const float* input,
+ const float* max,
+ float* output,
+ float* sum,
+ const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(max != NULL);
+ assert(output != NULL);
+ assert(sum != NULL);
+
+ const __m512 vi_max = _mm512_set1_ps(*max);
+ const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
+ const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
+ const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
+ const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
+ const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
+ const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
+ const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
+ const __m512 vc0 = _mm512_set1_ps(params->avx512_rr1_p5.c0);
+
+ __m512 vacc0 = _mm512_setzero_ps();
+ __m512 vacc1 = _mm512_setzero_ps();
+ __m512 vacc2 = _mm512_setzero_ps();
+ __m512 vacc3 = _mm512_setzero_ps();
+ for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
+ const __m512 vi0 = _mm512_loadu_ps(input);
+ const __m512 vi1 = _mm512_loadu_ps(input + 16);
+ const __m512 vi2 = _mm512_loadu_ps(input + 32);
+ const __m512 vi3 = _mm512_loadu_ps(input + 48);
+ input += 64;
+
+ const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
+ const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
+ const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
+ const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
+
+ const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
+ const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
+ const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
+ const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
+
+ const __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vx0);
+ const __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vx1);
+ const __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vx2);
+ const __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vx3);
+
+ __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
+ __m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
+ __m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
+ __m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
+
+ const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
+ const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
+ const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
+ const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
+
+ _mm512_storeu_ps(output, vf0);
+ _mm512_storeu_ps(output + 16, vf1);
+ _mm512_storeu_ps(output + 32, vf2);
+ _mm512_storeu_ps(output + 48, vf3);
+ output += 64;
+
+ vacc0 = _mm512_add_ps(vacc0, vf0);
+ vacc1 = _mm512_add_ps(vacc1, vf1);
+ vacc2 = _mm512_add_ps(vacc2, vf2);
+ vacc3 = _mm512_add_ps(vacc3, vf3);
+ }
+ vacc0 = _mm512_add_ps(vacc0, vacc1);
+ vacc2 = _mm512_add_ps(vacc2, vacc3);
+ vacc0 = _mm512_add_ps(vacc0, vacc2);
+
+ __m512 vacc = vacc0;
+ for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
+ const __m512 vi = _mm512_loadu_ps(input);
+ input += 16;
+
+ const __m512 vx = _mm512_sub_ps(vi, vi_max);
+
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
+
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ const __m512 vf = _mm512_scalef_ps(vp, vn);
+
+ _mm512_storeu_ps(output, vf);
+ output += 16;
+
+ vacc = _mm512_add_ps(vacc, vf);
+ }
+ if (batch != 0) {
+ // Prepare mask for valid 32-bit batch (depends on batch).
+ batch >>= XNN_LOG2_SIZEOF_FLOAT;
+ const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << batch) - UINT32_C(1)));
+
+ const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
+
+ const __m512 vx = _mm512_sub_ps(vi, vi_max);
+
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
+
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ const __m512 vf = _mm512_scalef_ps(vp, vn);
+
+ _mm512_mask_storeu_ps(output, vmask, vf);
+
+ vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
+ }
+ *sum = _mm512_reduce_add_ps(vacc);
+}
diff --git a/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64.c b/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64.c
new file mode 100644
index 000000000..adf550df1
--- /dev/null
+++ b/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-u64.c
@@ -0,0 +1,158 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/raddstoreexpminusmax.h>
+
+
+void xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64(
+ size_t batch,
+ const float* input,
+ const float* max,
+ float* output,
+ float* sum,
+ const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+ assert(batch != 0);
+ assert(batch % sizeof(float) == 0);
+ assert(input != NULL);
+ assert(max != NULL);
+ assert(output != NULL);
+ assert(sum != NULL);
+
+ const __m512 vi_max = _mm512_set1_ps(*max);
+ const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
+ const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
+ const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
+ const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
+ const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
+ const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
+ const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
+ const __m512 vc0 = _mm512_set1_ps(params->avx512_rr1_p5.c0);
+
+ __m512 vacc0 = _mm512_setzero_ps();
+ for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
+ const __m512 vi0 = _mm512_loadu_ps(input);
+ const __m512 vi1 = _mm512_loadu_ps(input + 16);
+ const __m512 vi2 = _mm512_loadu_ps(input + 32);
+ const __m512 vi3 = _mm512_loadu_ps(input + 48);
+ input += 64;
+
+ const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
+ const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
+ const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
+ const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
+
+ const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
+ const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
+ const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
+ const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
+
+ const __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vx0);
+ const __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vx1);
+ const __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vx2);
+ const __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vx3);
+
+ __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
+ __m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
+ __m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
+ __m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
+
+ vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
+ vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
+ vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
+ vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
+
+ const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
+ const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
+ const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
+ const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
+
+ _mm512_storeu_ps(output, vf0);
+ _mm512_storeu_ps(output + 16, vf1);
+ _mm512_storeu_ps(output + 32, vf2);
+ _mm512_storeu_ps(output + 48, vf3);
+ output += 64;
+
+ vacc0 = _mm512_add_ps(vacc0, vf0);
+ vacc0 = _mm512_add_ps(vacc0, vf1);
+ vacc0 = _mm512_add_ps(vacc0, vf2);
+ vacc0 = _mm512_add_ps(vacc0, vf3);
+ }
+
+ __m512 vacc = vacc0;
+ for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
+ const __m512 vi = _mm512_loadu_ps(input);
+ input += 16;
+
+ const __m512 vx = _mm512_sub_ps(vi, vi_max);
+
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
+
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ const __m512 vf = _mm512_scalef_ps(vp, vn);
+
+ _mm512_storeu_ps(output, vf);
+ output += 16;
+
+ vacc = _mm512_add_ps(vacc, vf);
+ }
+ if (batch != 0) {
+ // Prepare mask for valid 32-bit batch (depends on batch).
+ batch >>= XNN_LOG2_SIZEOF_FLOAT;
+ const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << batch) - UINT32_C(1)));
+
+ const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
+
+ const __m512 vx = _mm512_sub_ps(vi, vi_max);
+
+ const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
+
+ const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
+
+ __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
+ vp = _mm512_fmadd_ps(vp, vt, vc3);
+ vp = _mm512_fmadd_ps(vp, vt, vc2);
+ vp = _mm512_fmadd_ps(vp, vt, vc1);
+ vp = _mm512_fmadd_ps(vp, vt, vc0);
+
+ const __m512 vf = _mm512_scalef_ps(vp, vn);
+
+ _mm512_mask_storeu_ps(output, vmask, vf);
+
+ vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
+ }
+ *sum = _mm512_reduce_add_ps(vacc);
+}
diff --git a/src/xnnpack/raddexpminusmax.h b/src/xnnpack/raddexpminusmax.h
index 441c8db48..b0dfa90e1 100644
--- a/src/xnnpack/raddexpminusmax.h
+++ b/src/xnnpack/raddexpminusmax.h
@@ -21,6 +21,9 @@ extern "C" {
float* sum, \
float max);
+DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32)
+DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32_acc2)
+DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32_acc4)
DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u64)
DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u64_acc2)
DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u64_acc4)
@@ -34,6 +37,9 @@ DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__av
DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u96_acc3)
DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u96_acc6)
+DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64)
+DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64_acc2)
+DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64_acc4)
DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u128)
DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u128_acc2)
DECLARE_F32_RADDEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u128_acc4)
diff --git a/src/xnnpack/raddstoreexpminusmax.h b/src/xnnpack/raddstoreexpminusmax.h
index 60f07fbfe..fb4a3cb57 100644
--- a/src/xnnpack/raddstoreexpminusmax.h
+++ b/src/xnnpack/raddstoreexpminusmax.h
@@ -24,6 +24,8 @@ extern "C" {
void* sum, \
const union xnn_f16_expminus_params params[XNN_RESTRICT XNN_MIN_ELEMENTS(1)]);
+DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u16)
+DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u16_acc2)
DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u32)
DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u32_acc2)
DECLARE_F16_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u32_acc4)
@@ -146,6 +148,9 @@ DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_u
DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_u20_acc2)
DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_u20_acc5)
+DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32)
+DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32_acc2)
+DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32_acc4)
DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u64)
DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u64_acc2)
DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u64_acc4)
@@ -159,6 +164,9 @@ DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_u
DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u96_acc3)
DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u96_acc6)
+DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64)
+DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64_acc2)
+DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64_acc4)
DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u128)
DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u128_acc2)
DECLARE_F32_RADDSTOREEXPMINUSMAX_UKERNEL_FUNCTION(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u128_acc4)
diff --git a/test/f16-raddstoreexpminusmax.cc b/test/f16-raddstoreexpminusmax.cc
index b894e6cb6..a4edc782b 100644
--- a/test/f16-raddstoreexpminusmax.cc
+++ b/test/f16-raddstoreexpminusmax.cc
@@ -795,6 +795,80 @@
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_RADDSTOREEXPMINUSMAX__AVX2_RR1_P2_U16, elements_eq_16) {
+ TEST_REQUIRES_X86_AVX2;
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(16)
+ .Test(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u16, xnn_init_f16_expminus_avx2_rr1_p2_params);
+ }
+
+ TEST(F16_RADDSTOREEXPMINUSMAX__AVX2_RR1_P2_U16, elements_div_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 32; elements < 160; elements += 16) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u16, xnn_init_f16_expminus_avx2_rr1_p2_params);
+ }
+ }
+
+ TEST(F16_RADDSTOREEXPMINUSMAX__AVX2_RR1_P2_U16, elements_lt_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 1; elements < 16; elements++) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u16, xnn_init_f16_expminus_avx2_rr1_p2_params);
+ }
+ }
+
+ TEST(F16_RADDSTOREEXPMINUSMAX__AVX2_RR1_P2_U16, elements_gt_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 17; elements < 32; elements++) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u16, xnn_init_f16_expminus_avx2_rr1_p2_params);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F16_RADDSTOREEXPMINUSMAX__AVX2_RR1_P2_U16_ACC2, elements_eq_16) {
+ TEST_REQUIRES_X86_AVX2;
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(16)
+ .Test(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u16_acc2, xnn_init_f16_expminus_avx2_rr1_p2_params);
+ }
+
+ TEST(F16_RADDSTOREEXPMINUSMAX__AVX2_RR1_P2_U16_ACC2, elements_div_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 32; elements < 160; elements += 16) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u16_acc2, xnn_init_f16_expminus_avx2_rr1_p2_params);
+ }
+ }
+
+ TEST(F16_RADDSTOREEXPMINUSMAX__AVX2_RR1_P2_U16_ACC2, elements_lt_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 1; elements < 16; elements++) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u16_acc2, xnn_init_f16_expminus_avx2_rr1_p2_params);
+ }
+ }
+
+ TEST(F16_RADDSTOREEXPMINUSMAX__AVX2_RR1_P2_U16_ACC2, elements_gt_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 17; elements < 32; elements++) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u16_acc2, xnn_init_f16_expminus_avx2_rr1_p2_params);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
TEST(F16_RADDSTOREEXPMINUSMAX__AVX2_RR1_P2_U32, elements_eq_32) {
TEST_REQUIRES_X86_AVX2;
RAddStoreExpMinusMaxMicrokernelTester()
diff --git a/test/f16-raddstoreexpminusmax.yaml b/test/f16-raddstoreexpminusmax.yaml
index fba2191a8..2ea4d6135 100644
--- a/test/f16-raddstoreexpminusmax.yaml
+++ b/test/f16-raddstoreexpminusmax.yaml
@@ -48,6 +48,10 @@
init: xnn_init_f16_expminus_fp16arith_rr2_p2_params
# x86 AVX2
+- name: xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u16
+ init: xnn_init_f16_expminus_avx2_rr1_p2_params
+- name: xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u16_acc2
+ init: xnn_init_f16_expminus_avx2_rr1_p2_params
- name: xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u32
init: xnn_init_f16_expminus_avx2_rr1_p2_params
- name: xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_u32_acc2
diff --git a/test/f32-raddexpminusmax.cc b/test/f32-raddexpminusmax.cc
index 15296c397..69ac6ee32 100644
--- a/test/f32-raddexpminusmax.cc
+++ b/test/f32-raddexpminusmax.cc
@@ -18,6 +18,117 @@
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_RADDEXPMINUSMAX__AVX2_P5_U32, elements_eq_32) {
+ TEST_REQUIRES_X86_AVX2;
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(32)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32);
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX2_P5_U32, elements_div_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 64; elements < 320; elements += 32) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32);
+ }
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX2_P5_U32, elements_lt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 1; elements < 32; elements++) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32);
+ }
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX2_P5_U32, elements_gt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 33; elements < 64; elements++) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_RADDEXPMINUSMAX__AVX2_P5_U32_ACC2, elements_eq_32) {
+ TEST_REQUIRES_X86_AVX2;
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(32)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32_acc2);
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX2_P5_U32_ACC2, elements_div_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 64; elements < 320; elements += 32) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32_acc2);
+ }
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX2_P5_U32_ACC2, elements_lt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 1; elements < 32; elements++) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32_acc2);
+ }
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX2_P5_U32_ACC2, elements_gt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 33; elements < 64; elements++) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32_acc2);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_RADDEXPMINUSMAX__AVX2_P5_U32_ACC4, elements_eq_32) {
+ TEST_REQUIRES_X86_AVX2;
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(32)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32_acc4);
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX2_P5_U32_ACC4, elements_div_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 64; elements < 320; elements += 32) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32_acc4);
+ }
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX2_P5_U32_ACC4, elements_lt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 1; elements < 32; elements++) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32_acc4);
+ }
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX2_P5_U32_ACC4, elements_gt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 33; elements < 64; elements++) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32_acc4);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
TEST(F32_RADDEXPMINUSMAX__AVX2_P5_U64, elements_eq_64) {
TEST_REQUIRES_X86_AVX2;
RAddExpMinusMaxMicrokernelTester()
@@ -462,6 +573,117 @@
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_RADDEXPMINUSMAX__AVX512F_P5_SCALEF_U64, elements_eq_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(64)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64);
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX512F_P5_SCALEF_U64, elements_div_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 128; elements < 640; elements += 64) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64);
+ }
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX512F_P5_SCALEF_U64, elements_lt_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 1; elements < 64; elements++) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64);
+ }
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX512F_P5_SCALEF_U64, elements_gt_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 65; elements < 128; elements++) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_RADDEXPMINUSMAX__AVX512F_P5_SCALEF_U64_ACC2, elements_eq_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(64)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64_acc2);
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX512F_P5_SCALEF_U64_ACC2, elements_div_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 128; elements < 640; elements += 64) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64_acc2);
+ }
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX512F_P5_SCALEF_U64_ACC2, elements_lt_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 1; elements < 64; elements++) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64_acc2);
+ }
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX512F_P5_SCALEF_U64_ACC2, elements_gt_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 65; elements < 128; elements++) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64_acc2);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_RADDEXPMINUSMAX__AVX512F_P5_SCALEF_U64_ACC4, elements_eq_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(64)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64_acc4);
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX512F_P5_SCALEF_U64_ACC4, elements_div_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 128; elements < 640; elements += 64) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64_acc4);
+ }
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX512F_P5_SCALEF_U64_ACC4, elements_lt_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 1; elements < 64; elements++) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64_acc4);
+ }
+ }
+
+ TEST(F32_RADDEXPMINUSMAX__AVX512F_P5_SCALEF_U64_ACC4, elements_gt_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 65; elements < 128; elements++) {
+ RAddExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64_acc4);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
TEST(F32_RADDEXPMINUSMAX__AVX512F_P5_SCALEF_U128, elements_eq_128) {
TEST_REQUIRES_X86_AVX512F;
RAddExpMinusMaxMicrokernelTester()
diff --git a/test/f32-raddexpminusmax.yaml b/test/f32-raddexpminusmax.yaml
index f5dbcf227..f4be08d0c 100644
--- a/test/f32-raddexpminusmax.yaml
+++ b/test/f32-raddexpminusmax.yaml
@@ -4,6 +4,9 @@
# LICENSE file in the root directory of this source tree.
# x86 AVX
+- name: xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32
+- name: xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32_acc2
+- name: xnn_f32_raddexpminusmax_ukernel__avx2_p5_u32_acc4
- name: xnn_f32_raddexpminusmax_ukernel__avx2_p5_u64
- name: xnn_f32_raddexpminusmax_ukernel__avx2_p5_u64_acc2
- name: xnn_f32_raddexpminusmax_ukernel__avx2_p5_u64_acc4
@@ -16,7 +19,11 @@
- name: xnn_f32_raddexpminusmax_ukernel__avx2_p5_u96_acc2
- name: xnn_f32_raddexpminusmax_ukernel__avx2_p5_u96_acc3
- name: xnn_f32_raddexpminusmax_ukernel__avx2_p5_u96_acc6
+
# x86 AVX512
+- name: xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64
+- name: xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64_acc2
+- name: xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u64_acc4
- name: xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u128
- name: xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u128_acc2
- name: xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_u128_acc4
diff --git a/test/f32-raddstoreexpminusmax.cc b/test/f32-raddstoreexpminusmax.cc
index 19a4a7ad9..ac975f31a 100644
--- a/test/f32-raddstoreexpminusmax.cc
+++ b/test/f32-raddstoreexpminusmax.cc
@@ -2324,6 +2324,117 @@
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX2_RR1_P5_U32, elements_eq_32) {
+ TEST_REQUIRES_X86_AVX2;
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(32)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32, xnn_init_f32_expminus_avx2_rr1_p5_params);
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX2_RR1_P5_U32, elements_div_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 64; elements < 320; elements += 32) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32, xnn_init_f32_expminus_avx2_rr1_p5_params);
+ }
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX2_RR1_P5_U32, elements_lt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 1; elements < 32; elements++) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32, xnn_init_f32_expminus_avx2_rr1_p5_params);
+ }
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX2_RR1_P5_U32, elements_gt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 33; elements < 64; elements++) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32, xnn_init_f32_expminus_avx2_rr1_p5_params);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX2_RR1_P5_U32_ACC2, elements_eq_32) {
+ TEST_REQUIRES_X86_AVX2;
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(32)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32_acc2, xnn_init_f32_expminus_avx2_rr1_p5_params);
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX2_RR1_P5_U32_ACC2, elements_div_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 64; elements < 320; elements += 32) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32_acc2, xnn_init_f32_expminus_avx2_rr1_p5_params);
+ }
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX2_RR1_P5_U32_ACC2, elements_lt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 1; elements < 32; elements++) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32_acc2, xnn_init_f32_expminus_avx2_rr1_p5_params);
+ }
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX2_RR1_P5_U32_ACC2, elements_gt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 33; elements < 64; elements++) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32_acc2, xnn_init_f32_expminus_avx2_rr1_p5_params);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX2_RR1_P5_U32_ACC4, elements_eq_32) {
+ TEST_REQUIRES_X86_AVX2;
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(32)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32_acc4, xnn_init_f32_expminus_avx2_rr1_p5_params);
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX2_RR1_P5_U32_ACC4, elements_div_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 64; elements < 320; elements += 32) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32_acc4, xnn_init_f32_expminus_avx2_rr1_p5_params);
+ }
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX2_RR1_P5_U32_ACC4, elements_lt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 1; elements < 32; elements++) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32_acc4, xnn_init_f32_expminus_avx2_rr1_p5_params);
+ }
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX2_RR1_P5_U32_ACC4, elements_gt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t elements = 33; elements < 64; elements++) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32_acc4, xnn_init_f32_expminus_avx2_rr1_p5_params);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
TEST(F32_RADDSTOREEXPMINUSMAX__AVX2_RR1_P5_U64, elements_eq_64) {
TEST_REQUIRES_X86_AVX2;
RAddStoreExpMinusMaxMicrokernelTester()
@@ -2768,6 +2879,117 @@
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX512F_RR1_P5_SCALEF_U64, elements_eq_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(64)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64, xnn_init_f32_expminus_avx512_rr1_p5_params);
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX512F_RR1_P5_SCALEF_U64, elements_div_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 128; elements < 640; elements += 64) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64, xnn_init_f32_expminus_avx512_rr1_p5_params);
+ }
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX512F_RR1_P5_SCALEF_U64, elements_lt_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 1; elements < 64; elements++) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64, xnn_init_f32_expminus_avx512_rr1_p5_params);
+ }
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX512F_RR1_P5_SCALEF_U64, elements_gt_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 65; elements < 128; elements++) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64, xnn_init_f32_expminus_avx512_rr1_p5_params);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX512F_RR1_P5_SCALEF_U64_ACC2, elements_eq_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(64)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64_acc2, xnn_init_f32_expminus_avx512_rr1_p5_params);
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX512F_RR1_P5_SCALEF_U64_ACC2, elements_div_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 128; elements < 640; elements += 64) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64_acc2, xnn_init_f32_expminus_avx512_rr1_p5_params);
+ }
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX512F_RR1_P5_SCALEF_U64_ACC2, elements_lt_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 1; elements < 64; elements++) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64_acc2, xnn_init_f32_expminus_avx512_rr1_p5_params);
+ }
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX512F_RR1_P5_SCALEF_U64_ACC2, elements_gt_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 65; elements < 128; elements++) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64_acc2, xnn_init_f32_expminus_avx512_rr1_p5_params);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX512F_RR1_P5_SCALEF_U64_ACC4, elements_eq_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(64)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64_acc4, xnn_init_f32_expminus_avx512_rr1_p5_params);
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX512F_RR1_P5_SCALEF_U64_ACC4, elements_div_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 128; elements < 640; elements += 64) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64_acc4, xnn_init_f32_expminus_avx512_rr1_p5_params);
+ }
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX512F_RR1_P5_SCALEF_U64_ACC4, elements_lt_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 1; elements < 64; elements++) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64_acc4, xnn_init_f32_expminus_avx512_rr1_p5_params);
+ }
+ }
+
+ TEST(F32_RADDSTOREEXPMINUSMAX__AVX512F_RR1_P5_SCALEF_U64_ACC4, elements_gt_64) {
+ TEST_REQUIRES_X86_AVX512F;
+ for (size_t elements = 65; elements < 128; elements++) {
+ RAddStoreExpMinusMaxMicrokernelTester()
+ .elements(elements)
+ .Test(xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64_acc4, xnn_init_f32_expminus_avx512_rr1_p5_params);
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
TEST(F32_RADDSTOREEXPMINUSMAX__AVX512F_RR1_P5_SCALEF_U128, elements_eq_128) {
TEST_REQUIRES_X86_AVX512F;
RAddStoreExpMinusMaxMicrokernelTester()
diff --git a/test/f32-raddstoreexpminusmax.yaml b/test/f32-raddstoreexpminusmax.yaml
index f4f9cb5d8..5d4e77ee4 100644
--- a/test/f32-raddstoreexpminusmax.yaml
+++ b/test/f32-raddstoreexpminusmax.yaml
@@ -100,11 +100,13 @@
init: xnn_init_f32_expminus_neonfma_rr1_p5_params
- name: xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_u20_acc5
init: xnn_init_f32_expminus_neonfma_rr1_p5_params
+
# RISC-V Vector
- name: xnn_f32_raddstoreexpminusmax_ukernel__rvv_rr2_p6_u2v
init: xnn_init_f32_expminus_rvv_rr2_p6_params
- name: xnn_f32_raddstoreexpminusmax_ukernel__rvv_rr2_p6_u4v
init: xnn_init_f32_expminus_rvv_rr2_p6_params
+
# x86 SSE
- name: xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_u4
init: xnn_init_f32_expminus_sse2_rr2_p5_params
@@ -130,7 +132,14 @@
init: xnn_init_f32_expminus_sse2_rr2_p5_params
- name: xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_u20_acc5
init: xnn_init_f32_expminus_sse2_rr2_p5_params
+
# x86 AVX
+- name: xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32
+ init: xnn_init_f32_expminus_avx2_rr1_p5_params
+- name: xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32_acc2
+ init: xnn_init_f32_expminus_avx2_rr1_p5_params
+- name: xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u32_acc4
+ init: xnn_init_f32_expminus_avx2_rr1_p5_params
- name: xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u64
init: xnn_init_f32_expminus_avx2_rr1_p5_params
- name: xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u64_acc2
@@ -155,7 +164,14 @@
init: xnn_init_f32_expminus_avx2_rr1_p5_params
- name: xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_u96_acc6
init: xnn_init_f32_expminus_avx2_rr1_p5_params
+
# x86 AVX512
+- name: xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64
+ init: xnn_init_f32_expminus_avx512_rr1_p5_params
+- name: xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64_acc2
+ init: xnn_init_f32_expminus_avx512_rr1_p5_params
+- name: xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u64_acc4
+ init: xnn_init_f32_expminus_avx512_rr1_p5_params
- name: xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u128
init: xnn_init_f32_expminus_avx512_rr1_p5_params
- name: xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u128_acc2
@@ -180,6 +196,7 @@
init: xnn_init_f32_expminus_avx512_rr1_p5_params
- name: xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_u192_acc6
init: xnn_init_f32_expminus_avx512_rr1_p5_params
+
# WAsm SIMD
- name: xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_u4
init: xnn_init_f32_expminus_wasmsimd_rr2_p5_params
@@ -205,6 +222,7 @@
init: xnn_init_f32_expminus_wasmsimd_rr2_p5_params
- name: xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_u20_acc5
init: xnn_init_f32_expminus_wasmsimd_rr2_p5_params
+
# WAsm Relaxed SIMD
- name: xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_u4
init: xnn_init_f32_expminus_wasmsimd_rr2_p5_params
@@ -230,6 +248,7 @@
init: xnn_init_f32_expminus_wasmsimd_rr2_p5_params
- name: xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_u20_acc5
init: xnn_init_f32_expminus_wasmsimd_rr2_p5_params
+
# Scalar
- name: xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_u1
init: xnn_init_f32_expminus_scalar_rr2_lut64_p2_params
diff --git a/test/f32-vscaleexpminusmax.yaml b/test/f32-vscaleexpminusmax.yaml
index a2be7f4aa..bfe7ef8cf 100644
--- a/test/f32-vscaleexpminusmax.yaml
+++ b/test/f32-vscaleexpminusmax.yaml
@@ -16,6 +16,7 @@
- name: xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_u80
- name: xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_u88
- name: xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_u96
+
# x86 AVX512
- name: xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_u16
- name: xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_u32