aboutsummaryrefslogtreecommitdiff
path: root/libvpx/vpx_dsp/arm/vpx_convolve8_neon.c
diff options
context:
space:
mode:
Diffstat (limited to 'libvpx/vpx_dsp/arm/vpx_convolve8_neon.c')
-rw-r--r--libvpx/vpx_dsp/arm/vpx_convolve8_neon.c1101
1 files changed, 757 insertions, 344 deletions
diff --git a/libvpx/vpx_dsp/arm/vpx_convolve8_neon.c b/libvpx/vpx_dsp/arm/vpx_convolve8_neon.c
index 06b58c438..b4cdd58c7 100644
--- a/libvpx/vpx_dsp/arm/vpx_convolve8_neon.c
+++ b/libvpx/vpx_dsp/arm/vpx_convolve8_neon.c
@@ -31,8 +31,9 @@
// instructions. This optimization is much faster in speed unit test, but slowed
// down the whole decoder by 5%.
-#if defined(__aarch64__) && defined(__ARM_FEATURE_DOTPROD) && \
- (__ARM_FEATURE_DOTPROD == 1)
+#if defined(__aarch64__) && \
+ (defined(__ARM_FEATURE_DOTPROD) || defined(__ARM_FEATURE_MATMUL_INT8))
+
DECLARE_ALIGNED(16, static const uint8_t, dot_prod_permute_tbl[48]) = {
0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6,
4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10,
@@ -53,9 +54,176 @@ DECLARE_ALIGNED(16, static const uint8_t, dot_prod_merge_block_tbl[48]) = {
3, 16, 17, 18, 7, 20, 21, 22, 11, 24, 25, 26, 15, 28, 29, 30
};
-static INLINE void transpose_concat_4x4(int8x8_t *a0, int8x8_t *a1,
- int8x8_t *a2, int8x8_t *a3,
- int8x16_t *b,
+#if defined(__ARM_FEATURE_MATMUL_INT8)
+
+void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const InterpKernel *filter, int x0_q4,
+ int x_step_q4, int y0_q4, int y_step_q4, int w,
+ int h) {
+ const int8x8_t filters = vmovn_s16(vld1q_s16(filter[x0_q4]));
+ uint8x16_t s0, s1, s2, s3;
+
+ assert(!((intptr_t)dst & 3));
+ assert(!(dst_stride & 3));
+ assert(x_step_q4 == 16);
+
+ (void)x_step_q4;
+ (void)y0_q4;
+ (void)y_step_q4;
+
+ src -= 3;
+
+ if (w == 4) {
+ const uint8x16x2_t permute_tbl = vld1q_u8_x2(dot_prod_permute_tbl);
+ do {
+ int32x4_t t0, t1, t2, t3;
+ int16x8_t t01, t23;
+ uint8x8_t d01, d23;
+
+ load_u8_16x4(src, src_stride, &s0, &s1, &s2, &s3);
+
+ t0 = convolve8_4_usdot(s0, filters, permute_tbl);
+ t1 = convolve8_4_usdot(s1, filters, permute_tbl);
+ t2 = convolve8_4_usdot(s2, filters, permute_tbl);
+ t3 = convolve8_4_usdot(s3, filters, permute_tbl);
+ t01 = vcombine_s16(vqmovn_s32(t0), vqmovn_s32(t1));
+ t23 = vcombine_s16(vqmovn_s32(t2), vqmovn_s32(t3));
+ d01 = vqrshrun_n_s16(t01, 7);
+ d23 = vqrshrun_n_s16(t23, 7);
+
+ store_u8(dst + 0 * dst_stride, dst_stride, d01);
+ store_u8(dst + 2 * dst_stride, dst_stride, d23);
+
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ h -= 4;
+ } while (h > 0);
+ } else {
+ const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl);
+ const uint8_t *s;
+ uint8_t *d;
+ int width;
+ uint8x8_t d0, d1, d2, d3;
+
+ do {
+ width = w;
+ s = src;
+ d = dst;
+ do {
+ load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ d0 = convolve8_8_usdot(s0, filters, permute_tbl);
+ d1 = convolve8_8_usdot(s1, filters, permute_tbl);
+ d2 = convolve8_8_usdot(s2, filters, permute_tbl);
+ d3 = convolve8_8_usdot(s3, filters, permute_tbl);
+
+ store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ width -= 8;
+ } while (width > 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ h -= 4;
+ } while (h > 0);
+ }
+}
+
+void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const InterpKernel *filter, int x0_q4,
+ int x_step_q4, int y0_q4, int y_step_q4,
+ int w, int h) {
+ const int8x8_t filters = vmovn_s16(vld1q_s16(filter[x0_q4]));
+ uint8x16_t s0, s1, s2, s3;
+
+ assert(!((intptr_t)dst & 3));
+ assert(!(dst_stride & 3));
+ assert(x_step_q4 == 16);
+
+ (void)x_step_q4;
+ (void)y0_q4;
+ (void)y_step_q4;
+
+ src -= 3;
+
+ if (w == 4) {
+ const uint8x16x2_t permute_tbl = vld1q_u8_x2(dot_prod_permute_tbl);
+ do {
+ int32x4_t t0, t1, t2, t3;
+ int16x8_t t01, t23;
+ uint8x8_t d01, d23, dd01, dd23;
+ dd01 = vdup_n_u8(0);
+ dd23 = vdup_n_u8(0);
+
+ load_u8_16x4(src, src_stride, &s0, &s1, &s2, &s3);
+
+ t0 = convolve8_4_usdot(s0, filters, permute_tbl);
+ t1 = convolve8_4_usdot(s1, filters, permute_tbl);
+ t2 = convolve8_4_usdot(s2, filters, permute_tbl);
+ t3 = convolve8_4_usdot(s3, filters, permute_tbl);
+ t01 = vcombine_s16(vqmovn_s32(t0), vqmovn_s32(t1));
+ t23 = vcombine_s16(vqmovn_s32(t2), vqmovn_s32(t3));
+ d01 = vqrshrun_n_s16(t01, 7);
+ d23 = vqrshrun_n_s16(t23, 7);
+
+ dd01 = load_u8(dst + 0 * dst_stride, dst_stride);
+ dd23 = load_u8(dst + 2 * dst_stride, dst_stride);
+
+ d01 = vrhadd_u8(d01, dd01);
+ d23 = vrhadd_u8(d23, dd23);
+
+ store_u8(dst + 0 * dst_stride, dst_stride, d01);
+ store_u8(dst + 2 * dst_stride, dst_stride, d23);
+
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ h -= 4;
+ } while (h > 0);
+ } else {
+ const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl);
+ const uint8_t *s;
+ uint8_t *d;
+ int width;
+ uint8x8_t d0, d1, d2, d3, dd0, dd1, dd2, dd3;
+
+ do {
+ width = w;
+ s = src;
+ d = dst;
+ do {
+ load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ d0 = convolve8_8_usdot(s0, filters, permute_tbl);
+ d1 = convolve8_8_usdot(s1, filters, permute_tbl);
+ d2 = convolve8_8_usdot(s2, filters, permute_tbl);
+ d3 = convolve8_8_usdot(s3, filters, permute_tbl);
+
+ load_u8_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3);
+
+ d0 = vrhadd_u8(d0, dd0);
+ d1 = vrhadd_u8(d1, dd1);
+ d2 = vrhadd_u8(d2, dd2);
+ d3 = vrhadd_u8(d3, dd3);
+
+ store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ s += 8;
+ d += 8;
+ width -= 8;
+ } while (width > 0);
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ h -= 4;
+ } while (h > 0);
+ }
+}
+
+static INLINE void transpose_concat_4x4(uint8x8_t a0, uint8x8_t a1,
+ uint8x8_t a2, uint8x8_t a3,
+ uint8x16_t *b,
const uint8x16_t permute_tbl) {
/* Transpose 8-bit elements and concatenate result rows as follows:
* a0: 00, 01, 02, 03, XX, XX, XX, XX
@@ -70,13 +238,13 @@ static INLINE void transpose_concat_4x4(int8x8_t *a0, int8x8_t *a1,
* inline helper is called many times from the same parent function.
*/
- int8x16x2_t samples = { { vcombine_s8(*a0, *a1), vcombine_s8(*a2, *a3) } };
- *b = vqtbl2q_s8(samples, permute_tbl);
+ uint8x16x2_t samples = { { vcombine_u8(a0, a1), vcombine_u8(a2, a3) } };
+ *b = vqtbl2q_u8(samples, permute_tbl);
}
-static INLINE void transpose_concat_8x4(int8x8_t *a0, int8x8_t *a1,
- int8x8_t *a2, int8x8_t *a3,
- int8x16_t *b0, int8x16_t *b1,
+static INLINE void transpose_concat_8x4(uint8x8_t a0, uint8x8_t a1,
+ uint8x8_t a2, uint8x8_t a3,
+ uint8x16_t *b0, uint8x16_t *b1,
const uint8x16x2_t permute_tbl) {
/* Transpose 8-bit elements and concatenate result rows as follows:
* a0: 00, 01, 02, 03, 04, 05, 06, 07
@@ -92,11 +260,364 @@ static INLINE void transpose_concat_8x4(int8x8_t *a0, int8x8_t *a1,
* inline helper is called many times from the same parent function.
*/
- int8x16x2_t samples = { { vcombine_s8(*a0, *a1), vcombine_s8(*a2, *a3) } };
- *b0 = vqtbl2q_s8(samples, permute_tbl.val[0]);
- *b1 = vqtbl2q_s8(samples, permute_tbl.val[1]);
+ uint8x16x2_t samples = { { vcombine_u8(a0, a1), vcombine_u8(a2, a3) } };
+ *b0 = vqtbl2q_u8(samples, permute_tbl.val[0]);
+ *b1 = vqtbl2q_u8(samples, permute_tbl.val[1]);
+}
+
+void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const InterpKernel *filter, int x0_q4,
+ int x_step_q4, int y0_q4, int y_step_q4, int w,
+ int h) {
+ const int8x8_t filters = vmovn_s16(vld1q_s16(filter[y0_q4]));
+ const uint8x16x3_t merge_block_tbl = vld1q_u8_x3(dot_prod_merge_block_tbl);
+ uint8x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10;
+ uint8x16x2_t samples_LUT;
+
+ assert(!((intptr_t)dst & 3));
+ assert(!(dst_stride & 3));
+ assert(y_step_q4 == 16);
+
+ (void)x0_q4;
+ (void)x_step_q4;
+ (void)y_step_q4;
+
+ src -= 3 * src_stride;
+
+ if (w == 4) {
+ const uint8x16_t tran_concat_tbl = vld1q_u8(dot_prod_tran_concat_tbl);
+ uint8x16_t s0123, s1234, s2345, s3456, s4567, s5678, s6789, s78910;
+ int32x4_t d0, d1, d2, d3;
+ uint8x8_t d01, d23;
+
+ load_u8_8x7(src, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ src += 7 * src_stride;
+
+ s7 = vdup_n_u8(0);
+ s8 = vdup_n_u8(0);
+ s9 = vdup_n_u8(0);
+
+ /* This operation combines a conventional transpose and the sample permute
+ * (see horizontal case) required before computing the dot product.
+ */
+ transpose_concat_4x4(s0, s1, s2, s3, &s0123, tran_concat_tbl);
+ transpose_concat_4x4(s1, s2, s3, s4, &s1234, tran_concat_tbl);
+ transpose_concat_4x4(s2, s3, s4, s5, &s2345, tran_concat_tbl);
+ transpose_concat_4x4(s3, s4, s5, s6, &s3456, tran_concat_tbl);
+ transpose_concat_4x4(s4, s5, s6, s7, &s4567, tran_concat_tbl);
+ transpose_concat_4x4(s5, s6, s7, s8, &s5678, tran_concat_tbl);
+ transpose_concat_4x4(s6, s7, s8, s9, &s6789, tran_concat_tbl);
+
+ do {
+ load_u8_8x4(src, src_stride, &s7, &s8, &s9, &s10);
+
+ transpose_concat_4x4(s7, s8, s9, s10, &s78910, tran_concat_tbl);
+
+ /* Merge new data into block from previous iteration. */
+ samples_LUT.val[0] = s3456;
+ samples_LUT.val[1] = s78910;
+ s4567 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]);
+ s5678 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]);
+ s6789 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]);
+
+ d0 = convolve8_4_usdot_partial(s0123, s4567, filters);
+ d1 = convolve8_4_usdot_partial(s1234, s5678, filters);
+ d2 = convolve8_4_usdot_partial(s2345, s6789, filters);
+ d3 = convolve8_4_usdot_partial(s3456, s78910, filters);
+ d01 = vqrshrun_n_s16(vcombine_s16(vqmovn_s32(d0), vqmovn_s32(d1)), 7);
+ d23 = vqrshrun_n_s16(vcombine_s16(vqmovn_s32(d2), vqmovn_s32(d3)), 7);
+
+ store_u8(dst + 0 * dst_stride, dst_stride, d01);
+ store_u8(dst + 2 * dst_stride, dst_stride, d23);
+
+ /* Prepare block for next iteration - re-using as much as possible. */
+ /* Shuffle everything up four rows. */
+ s0123 = s4567;
+ s1234 = s5678;
+ s2345 = s6789;
+ s3456 = s78910;
+
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ h -= 4;
+ } while (h > 0);
+ } else {
+ const uint8x16x2_t tran_concat_tbl = vld1q_u8_x2(dot_prod_tran_concat_tbl);
+ uint8x16_t s0123_lo, s0123_hi, s1234_lo, s1234_hi, s2345_lo, s2345_hi,
+ s3456_lo, s3456_hi, s4567_lo, s4567_hi, s5678_lo, s5678_hi, s6789_lo,
+ s6789_hi, s78910_lo, s78910_hi;
+ uint8x8_t d0, d1, d2, d3;
+ const uint8_t *s;
+ uint8_t *d;
+ int height;
+
+ do {
+ height = h;
+ s = src;
+ d = dst;
+
+ load_u8_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ s7 = vdup_n_u8(0);
+ s8 = vdup_n_u8(0);
+ s9 = vdup_n_u8(0);
+
+ /* This operation combines a conventional transpose and the sample permute
+ * (see horizontal case) required before computing the dot product.
+ */
+ transpose_concat_8x4(s0, s1, s2, s3, &s0123_lo, &s0123_hi,
+ tran_concat_tbl);
+ transpose_concat_8x4(s1, s2, s3, s4, &s1234_lo, &s1234_hi,
+ tran_concat_tbl);
+ transpose_concat_8x4(s2, s3, s4, s5, &s2345_lo, &s2345_hi,
+ tran_concat_tbl);
+ transpose_concat_8x4(s3, s4, s5, s6, &s3456_lo, &s3456_hi,
+ tran_concat_tbl);
+ transpose_concat_8x4(s4, s5, s6, s7, &s4567_lo, &s4567_hi,
+ tran_concat_tbl);
+ transpose_concat_8x4(s5, s6, s7, s8, &s5678_lo, &s5678_hi,
+ tran_concat_tbl);
+ transpose_concat_8x4(s6, s7, s8, s9, &s6789_lo, &s6789_hi,
+ tran_concat_tbl);
+
+ do {
+ load_u8_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ transpose_concat_8x4(s7, s8, s9, s10, &s78910_lo, &s78910_hi,
+ tran_concat_tbl);
+
+ /* Merge new data into block from previous iteration. */
+ samples_LUT.val[0] = s3456_lo;
+ samples_LUT.val[1] = s78910_lo;
+ s4567_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]);
+ s5678_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]);
+ s6789_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]);
+
+ samples_LUT.val[0] = s3456_hi;
+ samples_LUT.val[1] = s78910_hi;
+ s4567_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]);
+ s5678_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]);
+ s6789_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]);
+
+ d0 = convolve8_8_usdot_partial(s0123_lo, s4567_lo, s0123_hi, s4567_hi,
+ filters);
+ d1 = convolve8_8_usdot_partial(s1234_lo, s5678_lo, s1234_hi, s5678_hi,
+ filters);
+ d2 = convolve8_8_usdot_partial(s2345_lo, s6789_lo, s2345_hi, s6789_hi,
+ filters);
+ d3 = convolve8_8_usdot_partial(s3456_lo, s78910_lo, s3456_hi, s78910_hi,
+ filters);
+
+ store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ /* Prepare block for next iteration - re-using as much as possible. */
+ /* Shuffle everything up four rows. */
+ s0123_lo = s4567_lo;
+ s0123_hi = s4567_hi;
+ s1234_lo = s5678_lo;
+ s1234_hi = s5678_hi;
+ s2345_lo = s6789_lo;
+ s2345_hi = s6789_hi;
+ s3456_lo = s78910_lo;
+ s3456_hi = s78910_hi;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+ src += 8;
+ dst += 8;
+ w -= 8;
+ } while (w > 0);
+ }
}
+void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const InterpKernel *filter, int x0_q4,
+ int x_step_q4, int y0_q4, int y_step_q4, int w,
+ int h) {
+ const int8x8_t filters = vmovn_s16(vld1q_s16(filter[y0_q4]));
+ const uint8x16x3_t merge_block_tbl = vld1q_u8_x3(dot_prod_merge_block_tbl);
+ uint8x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10;
+ uint8x16x2_t samples_LUT;
+
+ assert(!((intptr_t)dst & 3));
+ assert(!(dst_stride & 3));
+ assert(y_step_q4 == 16);
+
+ (void)x0_q4;
+ (void)x_step_q4;
+ (void)y_step_q4;
+
+ src -= 3 * src_stride;
+
+ if (w == 4) {
+ const uint8x16_t tran_concat_tbl = vld1q_u8(dot_prod_tran_concat_tbl);
+ uint8x16_t s0123, s1234, s2345, s3456, s4567, s5678, s6789, s78910;
+ int32x4_t d0, d1, d2, d3;
+ uint8x8_t d01, d23, dd01, dd23;
+
+ load_u8_8x7(src, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ src += 7 * src_stride;
+
+ s7 = vdup_n_u8(0);
+ s8 = vdup_n_u8(0);
+ s9 = vdup_n_u8(0);
+
+ /* This operation combines a conventional transpose and the sample permute
+ * (see horizontal case) required before computing the dot product.
+ */
+ transpose_concat_4x4(s0, s1, s2, s3, &s0123, tran_concat_tbl);
+ transpose_concat_4x4(s1, s2, s3, s4, &s1234, tran_concat_tbl);
+ transpose_concat_4x4(s2, s3, s4, s5, &s2345, tran_concat_tbl);
+ transpose_concat_4x4(s3, s4, s5, s6, &s3456, tran_concat_tbl);
+ transpose_concat_4x4(s4, s5, s6, s7, &s4567, tran_concat_tbl);
+ transpose_concat_4x4(s5, s6, s7, s8, &s5678, tran_concat_tbl);
+ transpose_concat_4x4(s6, s7, s8, s9, &s6789, tran_concat_tbl);
+
+ do {
+ load_u8_8x4(src, src_stride, &s7, &s8, &s9, &s10);
+
+ transpose_concat_4x4(s7, s8, s9, s10, &s78910, tran_concat_tbl);
+
+ /* Merge new data into block from previous iteration. */
+ samples_LUT.val[0] = s3456;
+ samples_LUT.val[1] = s78910;
+ s4567 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]);
+ s5678 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]);
+ s6789 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]);
+
+ d0 = convolve8_4_usdot_partial(s0123, s4567, filters);
+ d1 = convolve8_4_usdot_partial(s1234, s5678, filters);
+ d2 = convolve8_4_usdot_partial(s2345, s6789, filters);
+ d3 = convolve8_4_usdot_partial(s3456, s78910, filters);
+ d01 = vqrshrun_n_s16(vcombine_s16(vqmovn_s32(d0), vqmovn_s32(d1)), 7);
+ d23 = vqrshrun_n_s16(vcombine_s16(vqmovn_s32(d2), vqmovn_s32(d3)), 7);
+
+ dd01 = load_u8(dst + 0 * dst_stride, dst_stride);
+ dd23 = load_u8(dst + 2 * dst_stride, dst_stride);
+
+ d01 = vrhadd_u8(d01, dd01);
+ d23 = vrhadd_u8(d23, dd23);
+
+ store_u8(dst + 0 * dst_stride, dst_stride, d01);
+ store_u8(dst + 2 * dst_stride, dst_stride, d23);
+
+ /* Prepare block for next iteration - re-using as much as possible. */
+ /* Shuffle everything up four rows. */
+ s0123 = s4567;
+ s1234 = s5678;
+ s2345 = s6789;
+ s3456 = s78910;
+
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
+ h -= 4;
+ } while (h > 0);
+ } else {
+ const uint8x16x2_t tran_concat_tbl = vld1q_u8_x2(dot_prod_tran_concat_tbl);
+ uint8x16_t s0123_lo, s0123_hi, s1234_lo, s1234_hi, s2345_lo, s2345_hi,
+ s3456_lo, s3456_hi, s4567_lo, s4567_hi, s5678_lo, s5678_hi, s6789_lo,
+ s6789_hi, s78910_lo, s78910_hi;
+ uint8x8_t d0, d1, d2, d3, dd0, dd1, dd2, dd3;
+ const uint8_t *s;
+ uint8_t *d;
+ int height;
+
+ do {
+ height = h;
+ s = src;
+ d = dst;
+
+ load_u8_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+ s += 7 * src_stride;
+
+ s7 = vdup_n_u8(0);
+ s8 = vdup_n_u8(0);
+ s9 = vdup_n_u8(0);
+
+ /* This operation combines a conventional transpose and the sample permute
+ * (see horizontal case) required before computing the dot product.
+ */
+ transpose_concat_8x4(s0, s1, s2, s3, &s0123_lo, &s0123_hi,
+ tran_concat_tbl);
+ transpose_concat_8x4(s1, s2, s3, s4, &s1234_lo, &s1234_hi,
+ tran_concat_tbl);
+ transpose_concat_8x4(s2, s3, s4, s5, &s2345_lo, &s2345_hi,
+ tran_concat_tbl);
+ transpose_concat_8x4(s3, s4, s5, s6, &s3456_lo, &s3456_hi,
+ tran_concat_tbl);
+ transpose_concat_8x4(s4, s5, s6, s7, &s4567_lo, &s4567_hi,
+ tran_concat_tbl);
+ transpose_concat_8x4(s5, s6, s7, s8, &s5678_lo, &s5678_hi,
+ tran_concat_tbl);
+ transpose_concat_8x4(s6, s7, s8, s9, &s6789_lo, &s6789_hi,
+ tran_concat_tbl);
+
+ do {
+ load_u8_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+ transpose_concat_8x4(s7, s8, s9, s10, &s78910_lo, &s78910_hi,
+ tran_concat_tbl);
+
+ /* Merge new data into block from previous iteration. */
+ samples_LUT.val[0] = s3456_lo;
+ samples_LUT.val[1] = s78910_lo;
+ s4567_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]);
+ s5678_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]);
+ s6789_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]);
+
+ samples_LUT.val[0] = s3456_hi;
+ samples_LUT.val[1] = s78910_hi;
+ s4567_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]);
+ s5678_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]);
+ s6789_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]);
+
+ d0 = convolve8_8_usdot_partial(s0123_lo, s4567_lo, s0123_hi, s4567_hi,
+ filters);
+ d1 = convolve8_8_usdot_partial(s1234_lo, s5678_lo, s1234_hi, s5678_hi,
+ filters);
+ d2 = convolve8_8_usdot_partial(s2345_lo, s6789_lo, s2345_hi, s6789_hi,
+ filters);
+ d3 = convolve8_8_usdot_partial(s3456_lo, s78910_lo, s3456_hi, s78910_hi,
+ filters);
+
+ load_u8_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3);
+
+ d0 = vrhadd_u8(d0, dd0);
+ d1 = vrhadd_u8(d1, dd1);
+ d2 = vrhadd_u8(d2, dd2);
+ d3 = vrhadd_u8(d3, dd3);
+
+ store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
+
+ /* Prepare block for next iteration - re-using as much as possible. */
+ /* Shuffle everything up four rows. */
+ s0123_lo = s4567_lo;
+ s0123_hi = s4567_hi;
+ s1234_lo = s5678_lo;
+ s1234_hi = s5678_hi;
+ s2345_lo = s6789_lo;
+ s2345_hi = s6789_hi;
+ s3456_lo = s78910_lo;
+ s3456_hi = s78910_hi;
+
+ s += 4 * src_stride;
+ d += 4 * dst_stride;
+ height -= 4;
+ } while (height > 0);
+ src += 8;
+ dst += 8;
+ w -= 8;
+ } while (w > 0);
+ }
+}
+
+#else // !defined(__ARM_FEATURE_MATMUL_INT8)
+
void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const InterpKernel *filter, int x0_q4,
@@ -125,33 +646,22 @@ void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
int16x8_t t01, t23;
uint8x8_t d01, d23;
- s0 = vld1q_u8(src);
- src += src_stride;
- s1 = vld1q_u8(src);
- src += src_stride;
- s2 = vld1q_u8(src);
- src += src_stride;
- s3 = vld1q_u8(src);
- src += src_stride;
-
- t0 = convolve8_4_dot(s0, filters, correction, range_limit, permute_tbl);
- t1 = convolve8_4_dot(s1, filters, correction, range_limit, permute_tbl);
- t2 = convolve8_4_dot(s2, filters, correction, range_limit, permute_tbl);
- t3 = convolve8_4_dot(s3, filters, correction, range_limit, permute_tbl);
+ load_u8_16x4(src, src_stride, &s0, &s1, &s2, &s3);
+ t0 = convolve8_4_sdot(s0, filters, correction, range_limit, permute_tbl);
+ t1 = convolve8_4_sdot(s1, filters, correction, range_limit, permute_tbl);
+ t2 = convolve8_4_sdot(s2, filters, correction, range_limit, permute_tbl);
+ t3 = convolve8_4_sdot(s3, filters, correction, range_limit, permute_tbl);
t01 = vcombine_s16(vqmovn_s32(t0), vqmovn_s32(t1));
t23 = vcombine_s16(vqmovn_s32(t2), vqmovn_s32(t3));
d01 = vqrshrun_n_s16(t01, 7);
d23 = vqrshrun_n_s16(t23, 7);
- vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d01), 0);
- dst += dst_stride;
- vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d01), 1);
- dst += dst_stride;
- vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d23), 0);
- dst += dst_stride;
- vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d23), 1);
- dst += dst_stride;
+ store_u8(dst + 0 * dst_stride, dst_stride, d01);
+ store_u8(dst + 2 * dst_stride, dst_stride, d23);
+
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
h -= 4;
} while (h > 0);
} else {
@@ -166,20 +676,18 @@ void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
s = src;
d = dst;
do {
- s0 = vld1q_u8(s + 0 * src_stride);
- s1 = vld1q_u8(s + 1 * src_stride);
- s2 = vld1q_u8(s + 2 * src_stride);
- s3 = vld1q_u8(s + 3 * src_stride);
+ load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
- d0 = convolve8_8_dot(s0, filters, correction, range_limit, permute_tbl);
- d1 = convolve8_8_dot(s1, filters, correction, range_limit, permute_tbl);
- d2 = convolve8_8_dot(s2, filters, correction, range_limit, permute_tbl);
- d3 = convolve8_8_dot(s3, filters, correction, range_limit, permute_tbl);
+ d0 =
+ convolve8_8_sdot(s0, filters, correction, range_limit, permute_tbl);
+ d1 =
+ convolve8_8_sdot(s1, filters, correction, range_limit, permute_tbl);
+ d2 =
+ convolve8_8_sdot(s2, filters, correction, range_limit, permute_tbl);
+ d3 =
+ convolve8_8_sdot(s3, filters, correction, range_limit, permute_tbl);
- vst1_u8(d + 0 * dst_stride, d0);
- vst1_u8(d + 1 * dst_stride, d1);
- vst1_u8(d + 2 * dst_stride, d2);
- vst1_u8(d + 3 * dst_stride, d3);
+ store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
s += 8;
d += 8;
@@ -222,20 +730,12 @@ void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
dd01 = vdup_n_u8(0);
dd23 = vdup_n_u8(0);
- s0 = vld1q_u8(src);
- src += src_stride;
- s1 = vld1q_u8(src);
- src += src_stride;
- s2 = vld1q_u8(src);
- src += src_stride;
- s3 = vld1q_u8(src);
- src += src_stride;
-
- t0 = convolve8_4_dot(s0, filters, correction, range_limit, permute_tbl);
- t1 = convolve8_4_dot(s1, filters, correction, range_limit, permute_tbl);
- t2 = convolve8_4_dot(s2, filters, correction, range_limit, permute_tbl);
- t3 = convolve8_4_dot(s3, filters, correction, range_limit, permute_tbl);
+ load_u8_16x4(src, src_stride, &s0, &s1, &s2, &s3);
+ t0 = convolve8_4_sdot(s0, filters, correction, range_limit, permute_tbl);
+ t1 = convolve8_4_sdot(s1, filters, correction, range_limit, permute_tbl);
+ t2 = convolve8_4_sdot(s2, filters, correction, range_limit, permute_tbl);
+ t3 = convolve8_4_sdot(s3, filters, correction, range_limit, permute_tbl);
t01 = vcombine_s16(vqmovn_s32(t0), vqmovn_s32(t1));
t23 = vcombine_s16(vqmovn_s32(t2), vqmovn_s32(t3));
d01 = vqrshrun_n_s16(t01, 7);
@@ -243,17 +743,15 @@ void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
dd01 = load_u8(dst + 0 * dst_stride, dst_stride);
dd23 = load_u8(dst + 2 * dst_stride, dst_stride);
+
d01 = vrhadd_u8(d01, dd01);
d23 = vrhadd_u8(d23, dd23);
- vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d01), 0);
- dst += dst_stride;
- vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d01), 1);
- dst += dst_stride;
- vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d23), 0);
- dst += dst_stride;
- vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d23), 1);
- dst += dst_stride;
+ store_u8(dst + 0 * dst_stride, dst_stride, d01);
+ store_u8(dst + 2 * dst_stride, dst_stride, d23);
+
+ src += 4 * src_stride;
+ dst += 4 * dst_stride;
h -= 4;
} while (h > 0);
} else {
@@ -268,29 +766,25 @@ void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
s = src;
d = dst;
do {
- s0 = vld1q_u8(s + 0 * src_stride);
- s1 = vld1q_u8(s + 1 * src_stride);
- s2 = vld1q_u8(s + 2 * src_stride);
- s3 = vld1q_u8(s + 3 * src_stride);
-
- d0 = convolve8_8_dot(s0, filters, correction, range_limit, permute_tbl);
- d1 = convolve8_8_dot(s1, filters, correction, range_limit, permute_tbl);
- d2 = convolve8_8_dot(s2, filters, correction, range_limit, permute_tbl);
- d3 = convolve8_8_dot(s3, filters, correction, range_limit, permute_tbl);
-
- dd0 = vld1_u8(d + 0 * dst_stride);
- dd1 = vld1_u8(d + 1 * dst_stride);
- dd2 = vld1_u8(d + 2 * dst_stride);
- dd3 = vld1_u8(d + 3 * dst_stride);
+ load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
+
+ d0 =
+ convolve8_8_sdot(s0, filters, correction, range_limit, permute_tbl);
+ d1 =
+ convolve8_8_sdot(s1, filters, correction, range_limit, permute_tbl);
+ d2 =
+ convolve8_8_sdot(s2, filters, correction, range_limit, permute_tbl);
+ d3 =
+ convolve8_8_sdot(s3, filters, correction, range_limit, permute_tbl);
+
+ load_u8_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3);
+
d0 = vrhadd_u8(d0, dd0);
d1 = vrhadd_u8(d1, dd1);
d2 = vrhadd_u8(d2, dd2);
d3 = vrhadd_u8(d3, dd3);
- vst1_u8(d + 0 * dst_stride, d0);
- vst1_u8(d + 1 * dst_stride, d1);
- vst1_u8(d + 2 * dst_stride, d2);
- vst1_u8(d + 3 * dst_stride, d3);
+ store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
s += 8;
d += 8;
@@ -303,6 +797,49 @@ void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
}
}
+static INLINE void transpose_concat_4x4(int8x8_t a0, int8x8_t a1, int8x8_t a2,
+ int8x8_t a3, int8x16_t *b,
+ const uint8x16_t permute_tbl) {
+ /* Transpose 8-bit elements and concatenate result rows as follows:
+ * a0: 00, 01, 02, 03, XX, XX, XX, XX
+ * a1: 10, 11, 12, 13, XX, XX, XX, XX
+ * a2: 20, 21, 22, 23, XX, XX, XX, XX
+ * a3: 30, 31, 32, 33, XX, XX, XX, XX
+ *
+ * b: 00, 10, 20, 30, 01, 11, 21, 31, 02, 12, 22, 32, 03, 13, 23, 33
+ *
+ * The 'permute_tbl' is always 'dot_prod_tran_concat_tbl' above. Passing it
+ * as an argument is preferable to loading it directly from memory as this
+ * inline helper is called many times from the same parent function.
+ */
+
+ int8x16x2_t samples = { { vcombine_s8(a0, a1), vcombine_s8(a2, a3) } };
+ *b = vqtbl2q_s8(samples, permute_tbl);
+}
+
+static INLINE void transpose_concat_8x4(int8x8_t a0, int8x8_t a1, int8x8_t a2,
+ int8x8_t a3, int8x16_t *b0,
+ int8x16_t *b1,
+ const uint8x16x2_t permute_tbl) {
+ /* Transpose 8-bit elements and concatenate result rows as follows:
+ * a0: 00, 01, 02, 03, 04, 05, 06, 07
+ * a1: 10, 11, 12, 13, 14, 15, 16, 17
+ * a2: 20, 21, 22, 23, 24, 25, 26, 27
+ * a3: 30, 31, 32, 33, 34, 35, 36, 37
+ *
+ * b0: 00, 10, 20, 30, 01, 11, 21, 31, 02, 12, 22, 32, 03, 13, 23, 33
+ * b1: 04, 14, 24, 34, 05, 15, 25, 35, 06, 16, 26, 36, 07, 17, 27, 37
+ *
+ * The 'permute_tbl' is always 'dot_prod_tran_concat_tbl' above. Passing it
+ * as an argument is preferable to loading it directly from memory as this
+ * inline helper is called many times from the same parent function.
+ */
+
+ int8x16x2_t samples = { { vcombine_s8(a0, a1), vcombine_s8(a2, a3) } };
+ *b0 = vqtbl2q_s8(samples, permute_tbl.val[0]);
+ *b1 = vqtbl2q_s8(samples, permute_tbl.val[1]);
+}
+
void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const InterpKernel *filter, int x0_q4,
@@ -333,14 +870,8 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
int32x4_t d0, d1, d2, d3;
uint8x8_t d01, d23;
- load_u8_8x4(src, src_stride, &t0, &t1, &t2, &t3);
- src += 4 * src_stride;
- t4 = vld1_u8(src);
- src += src_stride;
- t5 = vld1_u8(src);
- src += src_stride;
- t6 = vld1_u8(src);
- src += src_stride;
+ load_u8_8x7(src, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6);
+ src += 7 * src_stride;
/* Clamp sample range to [-128, 127] for 8-bit signed dot product. */
s0 = vreinterpret_s8_u8(vsub_u8(t0, range_limit));
@@ -357,13 +888,13 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
/* This operation combines a conventional transpose and the sample permute
* (see horizontal case) required before computing the dot product.
*/
- transpose_concat_4x4(&s0, &s1, &s2, &s3, &s0123, tran_concat_tbl);
- transpose_concat_4x4(&s1, &s2, &s3, &s4, &s1234, tran_concat_tbl);
- transpose_concat_4x4(&s2, &s3, &s4, &s5, &s2345, tran_concat_tbl);
- transpose_concat_4x4(&s3, &s4, &s5, &s6, &s3456, tran_concat_tbl);
- transpose_concat_4x4(&s4, &s5, &s6, &s7, &s4567, tran_concat_tbl);
- transpose_concat_4x4(&s5, &s6, &s7, &s8, &s5678, tran_concat_tbl);
- transpose_concat_4x4(&s6, &s7, &s8, &s9, &s6789, tran_concat_tbl);
+ transpose_concat_4x4(s0, s1, s2, s3, &s0123, tran_concat_tbl);
+ transpose_concat_4x4(s1, s2, s3, s4, &s1234, tran_concat_tbl);
+ transpose_concat_4x4(s2, s3, s4, s5, &s2345, tran_concat_tbl);
+ transpose_concat_4x4(s3, s4, s5, s6, &s3456, tran_concat_tbl);
+ transpose_concat_4x4(s4, s5, s6, s7, &s4567, tran_concat_tbl);
+ transpose_concat_4x4(s5, s6, s7, s8, &s5678, tran_concat_tbl);
+ transpose_concat_4x4(s6, s7, s8, s9, &s6789, tran_concat_tbl);
do {
uint8x8_t t7, t8, t9, t10;
@@ -375,7 +906,7 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
s9 = vreinterpret_s8_u8(vsub_u8(t9, range_limit));
s10 = vreinterpret_s8_u8(vsub_u8(t10, range_limit));
- transpose_concat_4x4(&s7, &s8, &s9, &s10, &s78910, tran_concat_tbl);
+ transpose_concat_4x4(s7, s8, s9, s10, &s78910, tran_concat_tbl);
/* Merge new data into block from previous iteration. */
samples_LUT.val[0] = s3456;
@@ -384,22 +915,15 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
s5678 = vqtbl2q_s8(samples_LUT, merge_block_tbl.val[1]);
s6789 = vqtbl2q_s8(samples_LUT, merge_block_tbl.val[2]);
- d0 = convolve8_4_dot_partial(s0123, s4567, correction, filters);
- d1 = convolve8_4_dot_partial(s1234, s5678, correction, filters);
- d2 = convolve8_4_dot_partial(s2345, s6789, correction, filters);
- d3 = convolve8_4_dot_partial(s3456, s78910, correction, filters);
-
+ d0 = convolve8_4_sdot_partial(s0123, s4567, correction, filters);
+ d1 = convolve8_4_sdot_partial(s1234, s5678, correction, filters);
+ d2 = convolve8_4_sdot_partial(s2345, s6789, correction, filters);
+ d3 = convolve8_4_sdot_partial(s3456, s78910, correction, filters);
d01 = vqrshrun_n_s16(vcombine_s16(vqmovn_s32(d0), vqmovn_s32(d1)), 7);
d23 = vqrshrun_n_s16(vcombine_s16(vqmovn_s32(d2), vqmovn_s32(d3)), 7);
- vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d01), 0);
- dst += dst_stride;
- vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d01), 1);
- dst += dst_stride;
- vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d23), 0);
- dst += dst_stride;
- vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d23), 1);
- dst += dst_stride;
+ store_u8(dst + 0 * dst_stride, dst_stride, d01);
+ store_u8(dst + 2 * dst_stride, dst_stride, d23);
/* Prepare block for next iteration - re-using as much as possible. */
/* Shuffle everything up four rows. */
@@ -409,6 +933,7 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
s3456 = s78910;
src += 4 * src_stride;
+ dst += 4 * dst_stride;
h -= 4;
} while (h > 0);
} else {
@@ -426,14 +951,8 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
s = src;
d = dst;
- load_u8_8x4(s, src_stride, &t0, &t1, &t2, &t3);
- s += 4 * src_stride;
- t4 = vld1_u8(s);
- s += src_stride;
- t5 = vld1_u8(s);
- s += src_stride;
- t6 = vld1_u8(s);
- s += src_stride;
+ load_u8_8x7(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6);
+ s += 7 * src_stride;
/* Clamp sample range to [-128, 127] for 8-bit signed dot product. */
s0 = vreinterpret_s8_u8(vsub_u8(t0, range_limit));
@@ -450,19 +969,19 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
/* This operation combines a conventional transpose and the sample permute
* (see horizontal case) required before computing the dot product.
*/
- transpose_concat_8x4(&s0, &s1, &s2, &s3, &s0123_lo, &s0123_hi,
+ transpose_concat_8x4(s0, s1, s2, s3, &s0123_lo, &s0123_hi,
tran_concat_tbl);
- transpose_concat_8x4(&s1, &s2, &s3, &s4, &s1234_lo, &s1234_hi,
+ transpose_concat_8x4(s1, s2, s3, s4, &s1234_lo, &s1234_hi,
tran_concat_tbl);
- transpose_concat_8x4(&s2, &s3, &s4, &s5, &s2345_lo, &s2345_hi,
+ transpose_concat_8x4(s2, s3, s4, s5, &s2345_lo, &s2345_hi,
tran_concat_tbl);
- transpose_concat_8x4(&s3, &s4, &s5, &s6, &s3456_lo, &s3456_hi,
+ transpose_concat_8x4(s3, s4, s5, s6, &s3456_lo, &s3456_hi,
tran_concat_tbl);
- transpose_concat_8x4(&s4, &s5, &s6, &s7, &s4567_lo, &s4567_hi,
+ transpose_concat_8x4(s4, s5, s6, s7, &s4567_lo, &s4567_hi,
tran_concat_tbl);
- transpose_concat_8x4(&s5, &s6, &s7, &s8, &s5678_lo, &s5678_hi,
+ transpose_concat_8x4(s5, s6, s7, s8, &s5678_lo, &s5678_hi,
tran_concat_tbl);
- transpose_concat_8x4(&s6, &s7, &s8, &s9, &s6789_lo, &s6789_hi,
+ transpose_concat_8x4(s6, s7, s8, s9, &s6789_lo, &s6789_hi,
tran_concat_tbl);
do {
@@ -475,7 +994,7 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
s9 = vreinterpret_s8_u8(vsub_u8(t9, range_limit));
s10 = vreinterpret_s8_u8(vsub_u8(t10, range_limit));
- transpose_concat_8x4(&s7, &s8, &s9, &s10, &s78910_lo, &s78910_hi,
+ transpose_concat_8x4(s7, s8, s9, s10, &s78910_lo, &s78910_hi,
tran_concat_tbl);
/* Merge new data into block from previous iteration. */
@@ -491,18 +1010,16 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
s5678_hi = vqtbl2q_s8(samples_LUT, merge_block_tbl.val[1]);
s6789_hi = vqtbl2q_s8(samples_LUT, merge_block_tbl.val[2]);
- d0 = convolve8_8_dot_partial(s0123_lo, s4567_lo, s0123_hi, s4567_hi,
- correction, filters);
- d1 = convolve8_8_dot_partial(s1234_lo, s5678_lo, s1234_hi, s5678_hi,
- correction, filters);
- d2 = convolve8_8_dot_partial(s2345_lo, s6789_lo, s2345_hi, s6789_hi,
- correction, filters);
- d3 = convolve8_8_dot_partial(s3456_lo, s78910_lo, s3456_hi, s78910_hi,
- correction, filters);
- vst1_u8(d + 0 * dst_stride, d0);
- vst1_u8(d + 1 * dst_stride, d1);
- vst1_u8(d + 2 * dst_stride, d2);
- vst1_u8(d + 3 * dst_stride, d3);
+ d0 = convolve8_8_sdot_partial(s0123_lo, s4567_lo, s0123_hi, s4567_hi,
+ correction, filters);
+ d1 = convolve8_8_sdot_partial(s1234_lo, s5678_lo, s1234_hi, s5678_hi,
+ correction, filters);
+ d2 = convolve8_8_sdot_partial(s2345_lo, s6789_lo, s2345_hi, s6789_hi,
+ correction, filters);
+ d3 = convolve8_8_sdot_partial(s3456_lo, s78910_lo, s3456_hi, s78910_hi,
+ correction, filters);
+
+ store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
/* Prepare block for next iteration - re-using as much as possible. */
/* Shuffle everything up four rows. */
@@ -556,14 +1073,8 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
int32x4_t d0, d1, d2, d3;
uint8x8_t d01, d23, dd01, dd23;
- load_u8_8x4(src, src_stride, &t0, &t1, &t2, &t3);
- src += 4 * src_stride;
- t4 = vld1_u8(src);
- src += src_stride;
- t5 = vld1_u8(src);
- src += src_stride;
- t6 = vld1_u8(src);
- src += src_stride;
+ load_u8_8x7(src, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6);
+ src += 7 * src_stride;
/* Clamp sample range to [-128, 127] for 8-bit signed dot product. */
s0 = vreinterpret_s8_u8(vsub_u8(t0, range_limit));
@@ -580,13 +1091,13 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
/* This operation combines a conventional transpose and the sample permute
* (see horizontal case) required before computing the dot product.
*/
- transpose_concat_4x4(&s0, &s1, &s2, &s3, &s0123, tran_concat_tbl);
- transpose_concat_4x4(&s1, &s2, &s3, &s4, &s1234, tran_concat_tbl);
- transpose_concat_4x4(&s2, &s3, &s4, &s5, &s2345, tran_concat_tbl);
- transpose_concat_4x4(&s3, &s4, &s5, &s6, &s3456, tran_concat_tbl);
- transpose_concat_4x4(&s4, &s5, &s6, &s7, &s4567, tran_concat_tbl);
- transpose_concat_4x4(&s5, &s6, &s7, &s8, &s5678, tran_concat_tbl);
- transpose_concat_4x4(&s6, &s7, &s8, &s9, &s6789, tran_concat_tbl);
+ transpose_concat_4x4(s0, s1, s2, s3, &s0123, tran_concat_tbl);
+ transpose_concat_4x4(s1, s2, s3, s4, &s1234, tran_concat_tbl);
+ transpose_concat_4x4(s2, s3, s4, s5, &s2345, tran_concat_tbl);
+ transpose_concat_4x4(s3, s4, s5, s6, &s3456, tran_concat_tbl);
+ transpose_concat_4x4(s4, s5, s6, s7, &s4567, tran_concat_tbl);
+ transpose_concat_4x4(s5, s6, s7, s8, &s5678, tran_concat_tbl);
+ transpose_concat_4x4(s6, s7, s8, s9, &s6789, tran_concat_tbl);
do {
uint8x8_t t7, t8, t9, t10;
@@ -598,7 +1109,7 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
s9 = vreinterpret_s8_u8(vsub_u8(t9, range_limit));
s10 = vreinterpret_s8_u8(vsub_u8(t10, range_limit));
- transpose_concat_4x4(&s7, &s8, &s9, &s10, &s78910, tran_concat_tbl);
+ transpose_concat_4x4(s7, s8, s9, s10, &s78910, tran_concat_tbl);
/* Merge new data into block from previous iteration. */
samples_LUT.val[0] = s3456;
@@ -607,27 +1118,21 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
s5678 = vqtbl2q_s8(samples_LUT, merge_block_tbl.val[1]);
s6789 = vqtbl2q_s8(samples_LUT, merge_block_tbl.val[2]);
- d0 = convolve8_4_dot_partial(s0123, s4567, correction, filters);
- d1 = convolve8_4_dot_partial(s1234, s5678, correction, filters);
- d2 = convolve8_4_dot_partial(s2345, s6789, correction, filters);
- d3 = convolve8_4_dot_partial(s3456, s78910, correction, filters);
-
+ d0 = convolve8_4_sdot_partial(s0123, s4567, correction, filters);
+ d1 = convolve8_4_sdot_partial(s1234, s5678, correction, filters);
+ d2 = convolve8_4_sdot_partial(s2345, s6789, correction, filters);
+ d3 = convolve8_4_sdot_partial(s3456, s78910, correction, filters);
d01 = vqrshrun_n_s16(vcombine_s16(vqmovn_s32(d0), vqmovn_s32(d1)), 7);
d23 = vqrshrun_n_s16(vcombine_s16(vqmovn_s32(d2), vqmovn_s32(d3)), 7);
dd01 = load_u8(dst + 0 * dst_stride, dst_stride);
dd23 = load_u8(dst + 2 * dst_stride, dst_stride);
+
d01 = vrhadd_u8(d01, dd01);
d23 = vrhadd_u8(d23, dd23);
- vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d01), 0);
- dst += dst_stride;
- vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d01), 1);
- dst += dst_stride;
- vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d23), 0);
- dst += dst_stride;
- vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d23), 1);
- dst += dst_stride;
+ store_u8(dst + 0 * dst_stride, dst_stride, d01);
+ store_u8(dst + 2 * dst_stride, dst_stride, d23);
/* Prepare block for next iteration - re-using as much as possible. */
/* Shuffle everything up four rows. */
@@ -637,6 +1142,7 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
s3456 = s78910;
src += 4 * src_stride;
+ dst += 4 * dst_stride;
h -= 4;
} while (h > 0);
} else {
@@ -654,14 +1160,8 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
s = src;
d = dst;
- load_u8_8x4(s, src_stride, &t0, &t1, &t2, &t3);
- s += 4 * src_stride;
- t4 = vld1_u8(s);
- s += src_stride;
- t5 = vld1_u8(s);
- s += src_stride;
- t6 = vld1_u8(s);
- s += src_stride;
+ load_u8_8x7(s, src_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6);
+ s += 7 * src_stride;
/* Clamp sample range to [-128, 127] for 8-bit signed dot product. */
s0 = vreinterpret_s8_u8(vsub_u8(t0, range_limit));
@@ -678,19 +1178,19 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
/* This operation combines a conventional transpose and the sample permute
* (see horizontal case) required before computing the dot product.
*/
- transpose_concat_8x4(&s0, &s1, &s2, &s3, &s0123_lo, &s0123_hi,
+ transpose_concat_8x4(s0, s1, s2, s3, &s0123_lo, &s0123_hi,
tran_concat_tbl);
- transpose_concat_8x4(&s1, &s2, &s3, &s4, &s1234_lo, &s1234_hi,
+ transpose_concat_8x4(s1, s2, s3, s4, &s1234_lo, &s1234_hi,
tran_concat_tbl);
- transpose_concat_8x4(&s2, &s3, &s4, &s5, &s2345_lo, &s2345_hi,
+ transpose_concat_8x4(s2, s3, s4, s5, &s2345_lo, &s2345_hi,
tran_concat_tbl);
- transpose_concat_8x4(&s3, &s4, &s5, &s6, &s3456_lo, &s3456_hi,
+ transpose_concat_8x4(s3, s4, s5, s6, &s3456_lo, &s3456_hi,
tran_concat_tbl);
- transpose_concat_8x4(&s4, &s5, &s6, &s7, &s4567_lo, &s4567_hi,
+ transpose_concat_8x4(s4, s5, s6, s7, &s4567_lo, &s4567_hi,
tran_concat_tbl);
- transpose_concat_8x4(&s5, &s6, &s7, &s8, &s5678_lo, &s5678_hi,
+ transpose_concat_8x4(s5, s6, s7, s8, &s5678_lo, &s5678_hi,
tran_concat_tbl);
- transpose_concat_8x4(&s6, &s7, &s8, &s9, &s6789_lo, &s6789_hi,
+ transpose_concat_8x4(s6, s7, s8, s9, &s6789_lo, &s6789_hi,
tran_concat_tbl);
do {
@@ -703,7 +1203,7 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
s9 = vreinterpret_s8_u8(vsub_u8(t9, range_limit));
s10 = vreinterpret_s8_u8(vsub_u8(t10, range_limit));
- transpose_concat_8x4(&s7, &s8, &s9, &s10, &s78910_lo, &s78910_hi,
+ transpose_concat_8x4(s7, s8, s9, s10, &s78910_lo, &s78910_hi,
tran_concat_tbl);
/* Merge new data into block from previous iteration. */
@@ -719,28 +1219,23 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
s5678_hi = vqtbl2q_s8(samples_LUT, merge_block_tbl.val[1]);
s6789_hi = vqtbl2q_s8(samples_LUT, merge_block_tbl.val[2]);
- d0 = convolve8_8_dot_partial(s0123_lo, s4567_lo, s0123_hi, s4567_hi,
- correction, filters);
- d1 = convolve8_8_dot_partial(s1234_lo, s5678_lo, s1234_hi, s5678_hi,
- correction, filters);
- d2 = convolve8_8_dot_partial(s2345_lo, s6789_lo, s2345_hi, s6789_hi,
- correction, filters);
- d3 = convolve8_8_dot_partial(s3456_lo, s78910_lo, s3456_hi, s78910_hi,
- correction, filters);
-
- dd0 = vld1_u8(d + 0 * dst_stride);
- dd1 = vld1_u8(d + 1 * dst_stride);
- dd2 = vld1_u8(d + 2 * dst_stride);
- dd3 = vld1_u8(d + 3 * dst_stride);
+ d0 = convolve8_8_sdot_partial(s0123_lo, s4567_lo, s0123_hi, s4567_hi,
+ correction, filters);
+ d1 = convolve8_8_sdot_partial(s1234_lo, s5678_lo, s1234_hi, s5678_hi,
+ correction, filters);
+ d2 = convolve8_8_sdot_partial(s2345_lo, s6789_lo, s2345_hi, s6789_hi,
+ correction, filters);
+ d3 = convolve8_8_sdot_partial(s3456_lo, s78910_lo, s3456_hi, s78910_hi,
+ correction, filters);
+
+ load_u8_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3);
+
d0 = vrhadd_u8(d0, dd0);
d1 = vrhadd_u8(d1, dd1);
d2 = vrhadd_u8(d2, dd2);
d3 = vrhadd_u8(d3, dd3);
- vst1_u8(d + 0 * dst_stride, d0);
- vst1_u8(d + 1 * dst_stride, d1);
- vst1_u8(d + 2 * dst_stride, d2);
- vst1_u8(d + 3 * dst_stride, d3);
+ store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
/* Prepare block for next iteration - re-using as much as possible. */
/* Shuffle everything up four rows. */
@@ -764,29 +1259,11 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
}
}
-#else
-
-static INLINE void store_u8_8x8(uint8_t *s, const ptrdiff_t p,
- const uint8x8_t s0, const uint8x8_t s1,
- const uint8x8_t s2, const uint8x8_t s3,
- const uint8x8_t s4, const uint8x8_t s5,
- const uint8x8_t s6, const uint8x8_t s7) {
- vst1_u8(s, s0);
- s += p;
- vst1_u8(s, s1);
- s += p;
- vst1_u8(s, s2);
- s += p;
- vst1_u8(s, s3);
- s += p;
- vst1_u8(s, s4);
- s += p;
- vst1_u8(s, s5);
- s += p;
- vst1_u8(s, s6);
- s += p;
- vst1_u8(s, s7);
-}
+#endif // defined(__ARM_FEATURE_MATMUL_INT8)
+
+#else // !(defined(__aarch64__) &&
+ // (defined(__ARM_FEATURE_DOTPROD) ||
+ // defined(__ARM_FEATURE_MATMUL_INT8)))
void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
@@ -808,16 +1285,13 @@ void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
if (h == 4) {
uint8x8_t d01, d23;
- int16x4_t filter3, filter4, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0,
- d1, d2, d3;
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, d1, d2, d3;
int16x8_t tt0, tt1, tt2, tt3;
__builtin_prefetch(src + 0 * src_stride);
__builtin_prefetch(src + 1 * src_stride);
__builtin_prefetch(src + 2 * src_stride);
__builtin_prefetch(src + 3 * src_stride);
- filter3 = vdup_lane_s16(vget_low_s16(filters), 3);
- filter4 = vdup_lane_s16(vget_high_s16(filters), 0);
load_u8_8x4(src, src_stride, &t0, &t1, &t2, &t3);
transpose_u8_8x4(&t0, &t1, &t2, &t3);
tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
@@ -849,14 +1323,10 @@ void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
s9 = vget_low_s16(tt2);
s10 = vget_low_s16(tt3);
- d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
- filter4);
- d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
- filter4);
- d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
- filter4);
- d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
- filter4);
+ d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters);
+ d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters);
+ d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters);
+ d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters);
d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), 7);
d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), 7);
@@ -883,8 +1353,6 @@ void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
w -= 4;
} while (w != 0);
} else {
- const int16x8_t filter3 = vdupq_lane_s16(vget_low_s16(filters), 3);
- const int16x8_t filter4 = vdupq_lane_s16(vget_high_s16(filters), 0);
int width;
const uint8_t *s;
uint8x8_t t4, t5, t6, t7;
@@ -927,14 +1395,10 @@ void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
__builtin_prefetch(src + 5 * src_stride);
__builtin_prefetch(src + 6 * src_stride);
__builtin_prefetch(src + 7 * src_stride);
- t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
- filter4);
- t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
- filter4);
- t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
- filter4);
- t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
- filter4);
+ t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters);
+ t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters);
+ t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters);
+ t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters);
transpose_u8_8x4(&t0, &t1, &t2, &t3);
vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(t0), 0);
@@ -1002,22 +1466,14 @@ void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
s13 = vreinterpretq_s16_u16(vmovl_u8(t6));
s14 = vreinterpretq_s16_u16(vmovl_u8(t7));
- t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
- filter4);
- t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
- filter4);
- t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
- filter4);
- t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
- filter4);
- t4 = convolve8_8(s4, s5, s6, s7, s8, s9, s10, s11, filters, filter3,
- filter4);
- t5 = convolve8_8(s5, s6, s7, s8, s9, s10, s11, s12, filters, filter3,
- filter4);
- t6 = convolve8_8(s6, s7, s8, s9, s10, s11, s12, s13, filters, filter3,
- filter4);
- t7 = convolve8_8(s7, s8, s9, s10, s11, s12, s13, s14, filters,
- filter3, filter4);
+ t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters);
+ t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters);
+ t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters);
+ t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters);
+ t4 = convolve8_8(s4, s5, s6, s7, s8, s9, s10, s11, filters);
+ t5 = convolve8_8(s5, s6, s7, s8, s9, s10, s11, s12, filters);
+ t6 = convolve8_8(s6, s7, s8, s9, s10, s11, s12, s13, filters);
+ t7 = convolve8_8(s7, s8, s9, s10, s11, s12, s13, s14, filters);
transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
store_u8_8x8(d, dst_stride, t0, t1, t2, t3, t4, t5, t6, t7);
@@ -1061,8 +1517,7 @@ void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
if (h == 4) {
uint8x8_t d01, d23;
- int16x4_t filter3, filter4, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0,
- d1, d2, d3;
+ int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, d1, d2, d3;
int16x8_t tt0, tt1, tt2, tt3;
uint32x4_t d0123 = vdupq_n_u32(0);
@@ -1070,8 +1525,6 @@ void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
__builtin_prefetch(src + 1 * src_stride);
__builtin_prefetch(src + 2 * src_stride);
__builtin_prefetch(src + 3 * src_stride);
- filter3 = vdup_lane_s16(vget_low_s16(filters), 3);
- filter4 = vdup_lane_s16(vget_high_s16(filters), 0);
load_u8_8x4(src, src_stride, &t0, &t1, &t2, &t3);
transpose_u8_8x4(&t0, &t1, &t2, &t3);
tt0 = vreinterpretq_s16_u16(vmovl_u8(t0));
@@ -1103,14 +1556,10 @@ void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
s9 = vget_low_s16(tt2);
s10 = vget_low_s16(tt3);
- d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
- filter4);
- d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
- filter4);
- d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
- filter4);
- d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
- filter4);
+ d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters);
+ d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters);
+ d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters);
+ d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters);
d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), 7);
d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), 7);
@@ -1140,8 +1589,6 @@ void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
w -= 4;
} while (w != 0);
} else {
- const int16x8_t filter3 = vdupq_lane_s16(vget_low_s16(filters), 3);
- const int16x8_t filter4 = vdupq_lane_s16(vget_high_s16(filters), 0);
int width;
const uint8_t *s;
uint8x8_t t4, t5, t6, t7;
@@ -1186,14 +1633,10 @@ void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
__builtin_prefetch(src + 5 * src_stride);
__builtin_prefetch(src + 6 * src_stride);
__builtin_prefetch(src + 7 * src_stride);
- t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
- filter4);
- t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
- filter4);
- t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
- filter4);
- t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
- filter4);
+ t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters);
+ t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters);
+ t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters);
+ t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters);
transpose_u8_8x4(&t0, &t1, &t2, &t3);
@@ -1276,22 +1719,14 @@ void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride,
s13 = vreinterpretq_s16_u16(vmovl_u8(t6));
s14 = vreinterpretq_s16_u16(vmovl_u8(t7));
- t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
- filter4);
- t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
- filter4);
- t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
- filter4);
- t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
- filter4);
- t4 = convolve8_8(s4, s5, s6, s7, s8, s9, s10, s11, filters, filter3,
- filter4);
- t5 = convolve8_8(s5, s6, s7, s8, s9, s10, s11, s12, filters, filter3,
- filter4);
- t6 = convolve8_8(s6, s7, s8, s9, s10, s11, s12, s13, filters, filter3,
- filter4);
- t7 = convolve8_8(s7, s8, s9, s10, s11, s12, s13, s14, filters,
- filter3, filter4);
+ t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters);
+ t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters);
+ t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters);
+ t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters);
+ t4 = convolve8_8(s4, s5, s6, s7, s8, s9, s10, s11, filters);
+ t5 = convolve8_8(s5, s6, s7, s8, s9, s10, s11, s12, filters);
+ t6 = convolve8_8(s6, s7, s8, s9, s10, s11, s12, s13, filters);
+ t7 = convolve8_8(s7, s8, s9, s10, s11, s12, s13, s14, filters);
transpose_u8_8x8(&t0, &t1, &t2, &t3, &t4, &t5, &t6, &t7);
@@ -1349,8 +1784,6 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
src -= 3 * src_stride;
if (w == 4) {
- const int16x4_t filter3 = vdup_lane_s16(vget_low_s16(filters), 3);
- const int16x4_t filter4 = vdup_lane_s16(vget_high_s16(filters), 0);
uint8x8_t d01, d23;
int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, d1, d2, d3;
@@ -1387,14 +1820,10 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
__builtin_prefetch(src + 1 * src_stride);
__builtin_prefetch(src + 2 * src_stride);
__builtin_prefetch(src + 3 * src_stride);
- d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
- filter4);
- d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
- filter4);
- d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
- filter4);
- d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
- filter4);
+ d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters);
+ d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters);
+ d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters);
+ d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters);
d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), 7);
d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), 7);
@@ -1417,8 +1846,6 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
h -= 4;
} while (h != 0);
} else {
- const int16x8_t filter3 = vdupq_lane_s16(vget_low_s16(filters), 3);
- const int16x8_t filter4 = vdupq_lane_s16(vget_high_s16(filters), 0);
int height;
const uint8_t *s;
uint8_t *d;
@@ -1469,14 +1896,10 @@ void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
__builtin_prefetch(s + 1 * src_stride);
__builtin_prefetch(s + 2 * src_stride);
__builtin_prefetch(s + 3 * src_stride);
- t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
- filter4);
- t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
- filter4);
- t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
- filter4);
- t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
- filter4);
+ t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters);
+ t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters);
+ t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters);
+ t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters);
vst1_u8(d, t0);
d += dst_stride;
@@ -1521,8 +1944,6 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
src -= 3 * src_stride;
if (w == 4) {
- const int16x4_t filter3 = vdup_lane_s16(vget_low_s16(filters), 3);
- const int16x4_t filter4 = vdup_lane_s16(vget_high_s16(filters), 0);
uint8x8_t d01, d23;
int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, d0, d1, d2, d3;
uint32x4_t d0123 = vdupq_n_u32(0);
@@ -1560,14 +1981,10 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
__builtin_prefetch(src + 1 * src_stride);
__builtin_prefetch(src + 2 * src_stride);
__builtin_prefetch(src + 3 * src_stride);
- d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
- filter4);
- d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
- filter4);
- d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
- filter4);
- d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
- filter4);
+ d0 = convolve8_4(s0, s1, s2, s3, s4, s5, s6, s7, filters);
+ d1 = convolve8_4(s1, s2, s3, s4, s5, s6, s7, s8, filters);
+ d2 = convolve8_4(s2, s3, s4, s5, s6, s7, s8, s9, filters);
+ d3 = convolve8_4(s3, s4, s5, s6, s7, s8, s9, s10, filters);
d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), 7);
d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), 7);
@@ -1598,8 +2015,6 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
h -= 4;
} while (h != 0);
} else {
- const int16x8_t filter3 = vdupq_lane_s16(vget_low_s16(filters), 3);
- const int16x8_t filter4 = vdupq_lane_s16(vget_high_s16(filters), 0);
int height;
const uint8_t *s;
uint8_t *d;
@@ -1651,14 +2066,10 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
__builtin_prefetch(s + 1 * src_stride);
__builtin_prefetch(s + 2 * src_stride);
__builtin_prefetch(s + 3 * src_stride);
- t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters, filter3,
- filter4);
- t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters, filter3,
- filter4);
- t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters, filter3,
- filter4);
- t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters, filter3,
- filter4);
+ t0 = convolve8_8(s0, s1, s2, s3, s4, s5, s6, s7, filters);
+ t1 = convolve8_8(s1, s2, s3, s4, s5, s6, s7, s8, filters);
+ t2 = convolve8_8(s2, s3, s4, s5, s6, s7, s8, s9, filters);
+ t3 = convolve8_8(s3, s4, s5, s6, s7, s8, s9, s10, filters);
d01 = vcombine_u8(t0, t1);
d23 = vcombine_u8(t2, t3);
@@ -1694,4 +2105,6 @@ void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride,
}
}
-#endif
+#endif // #if defined(__aarch64__) &&
+ // (defined(__ARM_FEATURE_DOTPROD) ||
+ // defined(__ARM_FEATURE_MATMUL_INT8))