aboutsummaryrefslogtreecommitdiff
path: root/vp9/encoder/x86
diff options
context:
space:
mode:
Diffstat (limited to 'vp9/encoder/x86')
-rw-r--r--vp9/encoder/x86/highbd_temporal_filter_sse4.c7
-rw-r--r--vp9/encoder/x86/temporal_filter_constants.h410
-rw-r--r--vp9/encoder/x86/temporal_filter_sse4.c2
-rw-r--r--vp9/encoder/x86/vp9_diamond_search_sad_avx.c317
-rw-r--r--vp9/encoder/x86/vp9_frame_scale_ssse3.c24
-rw-r--r--vp9/encoder/x86/vp9_quantize_avx2.c65
-rw-r--r--vp9/encoder/x86/vp9_quantize_sse2.c11
-rw-r--r--vp9/encoder/x86/vp9_quantize_ssse3.c21
8 files changed, 65 insertions, 792 deletions
diff --git a/vp9/encoder/x86/highbd_temporal_filter_sse4.c b/vp9/encoder/x86/highbd_temporal_filter_sse4.c
index a7f5117cf..97f182c66 100644
--- a/vp9/encoder/x86/highbd_temporal_filter_sse4.c
+++ b/vp9/encoder/x86/highbd_temporal_filter_sse4.c
@@ -16,7 +16,7 @@
#include "vpx/vpx_integer.h"
#include "vp9/encoder/vp9_encoder.h"
#include "vp9/encoder/vp9_temporal_filter.h"
-#include "vp9/encoder/x86/temporal_filter_constants.h"
+#include "vp9/encoder/vp9_temporal_filter_constants.h"
// Compute (a-b)**2 for 8 pixels with size 16-bit
static INLINE void highbd_store_dist_8(const uint16_t *a, const uint16_t *b,
@@ -141,11 +141,12 @@ static INLINE void highbd_accumulate_and_store_8(const __m128i sum_first_u32,
count_u16 = _mm_adds_epu16(count_u16, sum_u16);
_mm_storeu_si128((__m128i *)count, count_u16);
- pred_u16 = _mm_mullo_epi16(sum_u16, pred_u16);
-
pred_0_u32 = _mm_cvtepu16_epi32(pred_u16);
pred_1_u32 = _mm_unpackhi_epi16(pred_u16, zero);
+ pred_0_u32 = _mm_mullo_epi32(sum_first_u32, pred_0_u32);
+ pred_1_u32 = _mm_mullo_epi32(sum_second_u32, pred_1_u32);
+
accum_0_u32 = _mm_loadu_si128((const __m128i *)accumulator);
accum_1_u32 = _mm_loadu_si128((const __m128i *)(accumulator + 4));
diff --git a/vp9/encoder/x86/temporal_filter_constants.h b/vp9/encoder/x86/temporal_filter_constants.h
deleted file mode 100644
index 7dcedda19..000000000
--- a/vp9/encoder/x86/temporal_filter_constants.h
+++ /dev/null
@@ -1,410 +0,0 @@
-/*
- * Copyright (c) 2019 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef VPX_VP9_ENCODER_X86_TEMPORAL_FILTER_CONSTANTS_H_
-#define VPX_VP9_ENCODER_X86_TEMPORAL_FILTER_CONSTANTS_H_
-#include "./vpx_config.h"
-
-// Division using multiplication and shifting. The C implementation does:
-// modifier *= 3;
-// modifier /= index;
-// where 'modifier' is a set of summed values and 'index' is the number of
-// summed values.
-//
-// This equation works out to (m * 3) / i which reduces to:
-// m * 3/4
-// m * 1/2
-// m * 1/3
-//
-// By pairing the multiply with a down shift by 16 (_mm_mulhi_epu16):
-// m * C / 65536
-// we can create a C to replicate the division.
-//
-// m * 49152 / 65536 = m * 3/4
-// m * 32758 / 65536 = m * 1/2
-// m * 21846 / 65536 = m * 0.3333
-//
-// These are loaded using an instruction expecting int16_t values but are used
-// with _mm_mulhi_epu16(), which treats them as unsigned.
-#define NEIGHBOR_CONSTANT_4 (int16_t)49152
-#define NEIGHBOR_CONSTANT_5 (int16_t)39322
-#define NEIGHBOR_CONSTANT_6 (int16_t)32768
-#define NEIGHBOR_CONSTANT_7 (int16_t)28087
-#define NEIGHBOR_CONSTANT_8 (int16_t)24576
-#define NEIGHBOR_CONSTANT_9 (int16_t)21846
-#define NEIGHBOR_CONSTANT_10 (int16_t)19661
-#define NEIGHBOR_CONSTANT_11 (int16_t)17874
-#define NEIGHBOR_CONSTANT_13 (int16_t)15124
-
-DECLARE_ALIGNED(16, static const int16_t, LEFT_CORNER_NEIGHBORS_PLUS_1[8]) = {
- NEIGHBOR_CONSTANT_5, NEIGHBOR_CONSTANT_7, NEIGHBOR_CONSTANT_7,
- NEIGHBOR_CONSTANT_7, NEIGHBOR_CONSTANT_7, NEIGHBOR_CONSTANT_7,
- NEIGHBOR_CONSTANT_7, NEIGHBOR_CONSTANT_7
-};
-
-DECLARE_ALIGNED(16, static const int16_t, RIGHT_CORNER_NEIGHBORS_PLUS_1[8]) = {
- NEIGHBOR_CONSTANT_7, NEIGHBOR_CONSTANT_7, NEIGHBOR_CONSTANT_7,
- NEIGHBOR_CONSTANT_7, NEIGHBOR_CONSTANT_7, NEIGHBOR_CONSTANT_7,
- NEIGHBOR_CONSTANT_7, NEIGHBOR_CONSTANT_5
-};
-
-DECLARE_ALIGNED(16, static const int16_t, LEFT_EDGE_NEIGHBORS_PLUS_1[8]) = {
- NEIGHBOR_CONSTANT_7, NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10,
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10,
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10
-};
-
-DECLARE_ALIGNED(16, static const int16_t, RIGHT_EDGE_NEIGHBORS_PLUS_1[8]) = {
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10,
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10,
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_7
-};
-
-DECLARE_ALIGNED(16, static const int16_t, MIDDLE_EDGE_NEIGHBORS_PLUS_1[8]) = {
- NEIGHBOR_CONSTANT_7, NEIGHBOR_CONSTANT_7, NEIGHBOR_CONSTANT_7,
- NEIGHBOR_CONSTANT_7, NEIGHBOR_CONSTANT_7, NEIGHBOR_CONSTANT_7,
- NEIGHBOR_CONSTANT_7, NEIGHBOR_CONSTANT_7
-};
-
-DECLARE_ALIGNED(16, static const int16_t, MIDDLE_CENTER_NEIGHBORS_PLUS_1[8]) = {
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10,
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10,
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10
-};
-
-DECLARE_ALIGNED(16, static const int16_t, LEFT_CORNER_NEIGHBORS_PLUS_2[8]) = {
- NEIGHBOR_CONSTANT_6, NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_8,
- NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_8,
- NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_8
-};
-
-DECLARE_ALIGNED(16, static const int16_t, RIGHT_CORNER_NEIGHBORS_PLUS_2[8]) = {
- NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_8,
- NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_8,
- NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_6
-};
-
-DECLARE_ALIGNED(16, static const int16_t, LEFT_EDGE_NEIGHBORS_PLUS_2[8]) = {
- NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_11,
- NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_11,
- NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_11
-};
-
-DECLARE_ALIGNED(16, static const int16_t, RIGHT_EDGE_NEIGHBORS_PLUS_2[8]) = {
- NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_11,
- NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_11,
- NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_8
-};
-
-DECLARE_ALIGNED(16, static const int16_t, MIDDLE_EDGE_NEIGHBORS_PLUS_2[8]) = {
- NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_8,
- NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_8,
- NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_8
-};
-
-DECLARE_ALIGNED(16, static const int16_t, MIDDLE_CENTER_NEIGHBORS_PLUS_2[8]) = {
- NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_11,
- NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_11,
- NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_11
-};
-
-DECLARE_ALIGNED(16, static const int16_t, TWO_CORNER_NEIGHBORS_PLUS_2[8]) = {
- NEIGHBOR_CONSTANT_6, NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_8,
- NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_8,
- NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_6
-};
-
-DECLARE_ALIGNED(16, static const int16_t, TWO_EDGE_NEIGHBORS_PLUS_2[8]) = {
- NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_11,
- NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_11,
- NEIGHBOR_CONSTANT_11, NEIGHBOR_CONSTANT_8
-};
-
-DECLARE_ALIGNED(16, static const int16_t, LEFT_CORNER_NEIGHBORS_PLUS_4[8]) = {
- NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10,
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10,
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10
-};
-
-DECLARE_ALIGNED(16, static const int16_t, RIGHT_CORNER_NEIGHBORS_PLUS_4[8]) = {
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10,
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10,
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_8
-};
-
-DECLARE_ALIGNED(16, static const int16_t, LEFT_EDGE_NEIGHBORS_PLUS_4[8]) = {
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_13,
- NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_13,
- NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_13
-};
-
-DECLARE_ALIGNED(16, static const int16_t, RIGHT_EDGE_NEIGHBORS_PLUS_4[8]) = {
- NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_13,
- NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_13,
- NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_10
-};
-
-DECLARE_ALIGNED(16, static const int16_t, MIDDLE_EDGE_NEIGHBORS_PLUS_4[8]) = {
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10,
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10,
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10
-};
-
-DECLARE_ALIGNED(16, static const int16_t, MIDDLE_CENTER_NEIGHBORS_PLUS_4[8]) = {
- NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_13,
- NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_13,
- NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_13
-};
-
-DECLARE_ALIGNED(16, static const int16_t, TWO_CORNER_NEIGHBORS_PLUS_4[8]) = {
- NEIGHBOR_CONSTANT_8, NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10,
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_10,
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_8
-};
-
-DECLARE_ALIGNED(16, static const int16_t, TWO_EDGE_NEIGHBORS_PLUS_4[8]) = {
- NEIGHBOR_CONSTANT_10, NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_13,
- NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_13,
- NEIGHBOR_CONSTANT_13, NEIGHBOR_CONSTANT_10
-};
-
-static const int16_t *const LUMA_LEFT_COLUMN_NEIGHBORS[2] = {
- LEFT_CORNER_NEIGHBORS_PLUS_2, LEFT_EDGE_NEIGHBORS_PLUS_2
-};
-
-static const int16_t *const LUMA_MIDDLE_COLUMN_NEIGHBORS[2] = {
- MIDDLE_EDGE_NEIGHBORS_PLUS_2, MIDDLE_CENTER_NEIGHBORS_PLUS_2
-};
-
-static const int16_t *const LUMA_RIGHT_COLUMN_NEIGHBORS[2] = {
- RIGHT_CORNER_NEIGHBORS_PLUS_2, RIGHT_EDGE_NEIGHBORS_PLUS_2
-};
-
-static const int16_t *const CHROMA_NO_SS_LEFT_COLUMN_NEIGHBORS[2] = {
- LEFT_CORNER_NEIGHBORS_PLUS_1, LEFT_EDGE_NEIGHBORS_PLUS_1
-};
-
-static const int16_t *const CHROMA_NO_SS_MIDDLE_COLUMN_NEIGHBORS[2] = {
- MIDDLE_EDGE_NEIGHBORS_PLUS_1, MIDDLE_CENTER_NEIGHBORS_PLUS_1
-};
-
-static const int16_t *const CHROMA_NO_SS_RIGHT_COLUMN_NEIGHBORS[2] = {
- RIGHT_CORNER_NEIGHBORS_PLUS_1, RIGHT_EDGE_NEIGHBORS_PLUS_1
-};
-
-static const int16_t *const CHROMA_SINGLE_SS_LEFT_COLUMN_NEIGHBORS[2] = {
- LEFT_CORNER_NEIGHBORS_PLUS_2, LEFT_EDGE_NEIGHBORS_PLUS_2
-};
-
-static const int16_t *const CHROMA_SINGLE_SS_MIDDLE_COLUMN_NEIGHBORS[2] = {
- MIDDLE_EDGE_NEIGHBORS_PLUS_2, MIDDLE_CENTER_NEIGHBORS_PLUS_2
-};
-
-static const int16_t *const CHROMA_SINGLE_SS_RIGHT_COLUMN_NEIGHBORS[2] = {
- RIGHT_CORNER_NEIGHBORS_PLUS_2, RIGHT_EDGE_NEIGHBORS_PLUS_2
-};
-
-static const int16_t *const CHROMA_SINGLE_SS_SINGLE_COLUMN_NEIGHBORS[2] = {
- TWO_CORNER_NEIGHBORS_PLUS_2, TWO_EDGE_NEIGHBORS_PLUS_2
-};
-
-static const int16_t *const CHROMA_DOUBLE_SS_LEFT_COLUMN_NEIGHBORS[2] = {
- LEFT_CORNER_NEIGHBORS_PLUS_4, LEFT_EDGE_NEIGHBORS_PLUS_4
-};
-
-static const int16_t *const CHROMA_DOUBLE_SS_MIDDLE_COLUMN_NEIGHBORS[2] = {
- MIDDLE_EDGE_NEIGHBORS_PLUS_4, MIDDLE_CENTER_NEIGHBORS_PLUS_4
-};
-
-static const int16_t *const CHROMA_DOUBLE_SS_RIGHT_COLUMN_NEIGHBORS[2] = {
- RIGHT_CORNER_NEIGHBORS_PLUS_4, RIGHT_EDGE_NEIGHBORS_PLUS_4
-};
-
-static const int16_t *const CHROMA_DOUBLE_SS_SINGLE_COLUMN_NEIGHBORS[2] = {
- TWO_CORNER_NEIGHBORS_PLUS_4, TWO_EDGE_NEIGHBORS_PLUS_4
-};
-
-#if CONFIG_VP9_HIGHBITDEPTH
-#define HIGHBD_NEIGHBOR_CONSTANT_4 (uint32_t)3221225472U
-#define HIGHBD_NEIGHBOR_CONSTANT_5 (uint32_t)2576980378U
-#define HIGHBD_NEIGHBOR_CONSTANT_6 (uint32_t)2147483648U
-#define HIGHBD_NEIGHBOR_CONSTANT_7 (uint32_t)1840700270U
-#define HIGHBD_NEIGHBOR_CONSTANT_8 (uint32_t)1610612736U
-#define HIGHBD_NEIGHBOR_CONSTANT_9 (uint32_t)1431655766U
-#define HIGHBD_NEIGHBOR_CONSTANT_10 (uint32_t)1288490189U
-#define HIGHBD_NEIGHBOR_CONSTANT_11 (uint32_t)1171354718U
-#define HIGHBD_NEIGHBOR_CONSTANT_13 (uint32_t)991146300U
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_LEFT_CORNER_NEIGHBORS_PLUS_1[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_5, HIGHBD_NEIGHBOR_CONSTANT_7,
- HIGHBD_NEIGHBOR_CONSTANT_7, HIGHBD_NEIGHBOR_CONSTANT_7
-};
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_RIGHT_CORNER_NEIGHBORS_PLUS_1[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_7, HIGHBD_NEIGHBOR_CONSTANT_7,
- HIGHBD_NEIGHBOR_CONSTANT_7, HIGHBD_NEIGHBOR_CONSTANT_5
-};
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_LEFT_EDGE_NEIGHBORS_PLUS_1[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_7, HIGHBD_NEIGHBOR_CONSTANT_10,
- HIGHBD_NEIGHBOR_CONSTANT_10, HIGHBD_NEIGHBOR_CONSTANT_10
-};
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_RIGHT_EDGE_NEIGHBORS_PLUS_1[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_10, HIGHBD_NEIGHBOR_CONSTANT_10,
- HIGHBD_NEIGHBOR_CONSTANT_10, HIGHBD_NEIGHBOR_CONSTANT_7
-};
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_MIDDLE_EDGE_NEIGHBORS_PLUS_1[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_7, HIGHBD_NEIGHBOR_CONSTANT_7,
- HIGHBD_NEIGHBOR_CONSTANT_7, HIGHBD_NEIGHBOR_CONSTANT_7
-};
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_MIDDLE_CENTER_NEIGHBORS_PLUS_1[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_10, HIGHBD_NEIGHBOR_CONSTANT_10,
- HIGHBD_NEIGHBOR_CONSTANT_10, HIGHBD_NEIGHBOR_CONSTANT_10
-};
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_LEFT_CORNER_NEIGHBORS_PLUS_2[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_6, HIGHBD_NEIGHBOR_CONSTANT_8,
- HIGHBD_NEIGHBOR_CONSTANT_8, HIGHBD_NEIGHBOR_CONSTANT_8
-};
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_RIGHT_CORNER_NEIGHBORS_PLUS_2[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_8, HIGHBD_NEIGHBOR_CONSTANT_8,
- HIGHBD_NEIGHBOR_CONSTANT_8, HIGHBD_NEIGHBOR_CONSTANT_6
-};
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_LEFT_EDGE_NEIGHBORS_PLUS_2[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_8, HIGHBD_NEIGHBOR_CONSTANT_11,
- HIGHBD_NEIGHBOR_CONSTANT_11, HIGHBD_NEIGHBOR_CONSTANT_11
-};
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_RIGHT_EDGE_NEIGHBORS_PLUS_2[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_11, HIGHBD_NEIGHBOR_CONSTANT_11,
- HIGHBD_NEIGHBOR_CONSTANT_11, HIGHBD_NEIGHBOR_CONSTANT_8
-};
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_MIDDLE_EDGE_NEIGHBORS_PLUS_2[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_8, HIGHBD_NEIGHBOR_CONSTANT_8,
- HIGHBD_NEIGHBOR_CONSTANT_8, HIGHBD_NEIGHBOR_CONSTANT_8
-};
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_MIDDLE_CENTER_NEIGHBORS_PLUS_2[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_11, HIGHBD_NEIGHBOR_CONSTANT_11,
- HIGHBD_NEIGHBOR_CONSTANT_11, HIGHBD_NEIGHBOR_CONSTANT_11
-};
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_LEFT_CORNER_NEIGHBORS_PLUS_4[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_8, HIGHBD_NEIGHBOR_CONSTANT_10,
- HIGHBD_NEIGHBOR_CONSTANT_10, HIGHBD_NEIGHBOR_CONSTANT_10
-};
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_RIGHT_CORNER_NEIGHBORS_PLUS_4[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_10, HIGHBD_NEIGHBOR_CONSTANT_10,
- HIGHBD_NEIGHBOR_CONSTANT_10, HIGHBD_NEIGHBOR_CONSTANT_8
-};
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_LEFT_EDGE_NEIGHBORS_PLUS_4[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_10, HIGHBD_NEIGHBOR_CONSTANT_13,
- HIGHBD_NEIGHBOR_CONSTANT_13, HIGHBD_NEIGHBOR_CONSTANT_13
-};
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_RIGHT_EDGE_NEIGHBORS_PLUS_4[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_13, HIGHBD_NEIGHBOR_CONSTANT_13,
- HIGHBD_NEIGHBOR_CONSTANT_13, HIGHBD_NEIGHBOR_CONSTANT_10
-};
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_MIDDLE_EDGE_NEIGHBORS_PLUS_4[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_10, HIGHBD_NEIGHBOR_CONSTANT_10,
- HIGHBD_NEIGHBOR_CONSTANT_10, HIGHBD_NEIGHBOR_CONSTANT_10
-};
-
-DECLARE_ALIGNED(16, static const uint32_t,
- HIGHBD_MIDDLE_CENTER_NEIGHBORS_PLUS_4[4]) = {
- HIGHBD_NEIGHBOR_CONSTANT_13, HIGHBD_NEIGHBOR_CONSTANT_13,
- HIGHBD_NEIGHBOR_CONSTANT_13, HIGHBD_NEIGHBOR_CONSTANT_13
-};
-
-static const uint32_t *const HIGHBD_LUMA_LEFT_COLUMN_NEIGHBORS[2] = {
- HIGHBD_LEFT_CORNER_NEIGHBORS_PLUS_2, HIGHBD_LEFT_EDGE_NEIGHBORS_PLUS_2
-};
-
-static const uint32_t *const HIGHBD_LUMA_MIDDLE_COLUMN_NEIGHBORS[2] = {
- HIGHBD_MIDDLE_EDGE_NEIGHBORS_PLUS_2, HIGHBD_MIDDLE_CENTER_NEIGHBORS_PLUS_2
-};
-
-static const uint32_t *const HIGHBD_LUMA_RIGHT_COLUMN_NEIGHBORS[2] = {
- HIGHBD_RIGHT_CORNER_NEIGHBORS_PLUS_2, HIGHBD_RIGHT_EDGE_NEIGHBORS_PLUS_2
-};
-
-static const uint32_t *const HIGHBD_CHROMA_NO_SS_LEFT_COLUMN_NEIGHBORS[2] = {
- HIGHBD_LEFT_CORNER_NEIGHBORS_PLUS_1, HIGHBD_LEFT_EDGE_NEIGHBORS_PLUS_1
-};
-
-static const uint32_t *const HIGHBD_CHROMA_NO_SS_MIDDLE_COLUMN_NEIGHBORS[2] = {
- HIGHBD_MIDDLE_EDGE_NEIGHBORS_PLUS_1, HIGHBD_MIDDLE_CENTER_NEIGHBORS_PLUS_1
-};
-
-static const uint32_t *const HIGHBD_CHROMA_NO_SS_RIGHT_COLUMN_NEIGHBORS[2] = {
- HIGHBD_RIGHT_CORNER_NEIGHBORS_PLUS_1, HIGHBD_RIGHT_EDGE_NEIGHBORS_PLUS_1
-};
-
-static const uint32_t
- *const HIGHBD_CHROMA_SINGLE_SS_LEFT_COLUMN_NEIGHBORS[2] = {
- HIGHBD_LEFT_CORNER_NEIGHBORS_PLUS_2, HIGHBD_LEFT_EDGE_NEIGHBORS_PLUS_2
- };
-
-static const uint32_t
- *const HIGHBD_CHROMA_SINGLE_SS_MIDDLE_COLUMN_NEIGHBORS[2] = {
- HIGHBD_MIDDLE_EDGE_NEIGHBORS_PLUS_2, HIGHBD_MIDDLE_CENTER_NEIGHBORS_PLUS_2
- };
-
-static const uint32_t
- *const HIGHBD_CHROMA_SINGLE_SS_RIGHT_COLUMN_NEIGHBORS[2] = {
- HIGHBD_RIGHT_CORNER_NEIGHBORS_PLUS_2, HIGHBD_RIGHT_EDGE_NEIGHBORS_PLUS_2
- };
-
-static const uint32_t
- *const HIGHBD_CHROMA_DOUBLE_SS_LEFT_COLUMN_NEIGHBORS[2] = {
- HIGHBD_LEFT_CORNER_NEIGHBORS_PLUS_4, HIGHBD_LEFT_EDGE_NEIGHBORS_PLUS_4
- };
-
-static const uint32_t
- *const HIGHBD_CHROMA_DOUBLE_SS_MIDDLE_COLUMN_NEIGHBORS[2] = {
- HIGHBD_MIDDLE_EDGE_NEIGHBORS_PLUS_4, HIGHBD_MIDDLE_CENTER_NEIGHBORS_PLUS_4
- };
-
-static const uint32_t
- *const HIGHBD_CHROMA_DOUBLE_SS_RIGHT_COLUMN_NEIGHBORS[2] = {
- HIGHBD_RIGHT_CORNER_NEIGHBORS_PLUS_4, HIGHBD_RIGHT_EDGE_NEIGHBORS_PLUS_4
- };
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
-#define DIST_STRIDE ((BW) + 2)
-
-#endif // VPX_VP9_ENCODER_X86_TEMPORAL_FILTER_CONSTANTS_H_
diff --git a/vp9/encoder/x86/temporal_filter_sse4.c b/vp9/encoder/x86/temporal_filter_sse4.c
index 87e68fb43..7571bfcca 100644
--- a/vp9/encoder/x86/temporal_filter_sse4.c
+++ b/vp9/encoder/x86/temporal_filter_sse4.c
@@ -16,7 +16,7 @@
#include "vpx/vpx_integer.h"
#include "vp9/encoder/vp9_encoder.h"
#include "vp9/encoder/vp9_temporal_filter.h"
-#include "vp9/encoder/x86/temporal_filter_constants.h"
+#include "vp9/encoder/vp9_temporal_filter_constants.h"
// Read in 8 pixels from a and b as 8-bit unsigned integers, compute the
// difference squared, and store as unsigned 16-bit integer to dst.
diff --git a/vp9/encoder/x86/vp9_diamond_search_sad_avx.c b/vp9/encoder/x86/vp9_diamond_search_sad_avx.c
deleted file mode 100644
index 0e04a2f41..000000000
--- a/vp9/encoder/x86/vp9_diamond_search_sad_avx.c
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#if defined(_MSC_VER)
-#include <intrin.h>
-#endif
-#include <emmintrin.h>
-#include <smmintrin.h>
-
-#include "vpx_dsp/vpx_dsp_common.h"
-#include "vp9/encoder/vp9_encoder.h"
-#include "vpx_ports/mem.h"
-
-#ifdef __GNUC__
-#define LIKELY(v) __builtin_expect(v, 1)
-#define UNLIKELY(v) __builtin_expect(v, 0)
-#else
-#define LIKELY(v) (v)
-#define UNLIKELY(v) (v)
-#endif
-
-static INLINE int_mv pack_int_mv(int16_t row, int16_t col) {
- int_mv result;
- result.as_mv.row = row;
- result.as_mv.col = col;
- return result;
-}
-
-static INLINE MV_JOINT_TYPE get_mv_joint(const int_mv mv) {
- // This is simplified from the C implementation to utilise that
- // x->nmvjointsadcost[1] == x->nmvjointsadcost[2] and
- // x->nmvjointsadcost[1] == x->nmvjointsadcost[3]
- return mv.as_int == 0 ? 0 : 1;
-}
-
-static INLINE int mv_cost(const int_mv mv, const int *joint_cost,
- int *const comp_cost[2]) {
- return joint_cost[get_mv_joint(mv)] + comp_cost[0][mv.as_mv.row] +
- comp_cost[1][mv.as_mv.col];
-}
-
-static int mvsad_err_cost(const MACROBLOCK *x, const int_mv mv, const MV *ref,
- int sad_per_bit) {
- const int_mv diff =
- pack_int_mv(mv.as_mv.row - ref->row, mv.as_mv.col - ref->col);
- return ROUND_POWER_OF_TWO(
- (unsigned)mv_cost(diff, x->nmvjointsadcost, x->nmvsadcost) * sad_per_bit,
- VP9_PROB_COST_SHIFT);
-}
-
-/*****************************************************************************
- * This function utilizes 3 properties of the cost function lookup tables, *
- * constructed in using 'cal_nmvjointsadcost' and 'cal_nmvsadcosts' in *
- * vp9_encoder.c. *
- * For the joint cost: *
- * - mvjointsadcost[1] == mvjointsadcost[2] == mvjointsadcost[3] *
- * For the component costs: *
- * - For all i: mvsadcost[0][i] == mvsadcost[1][i] *
- * (Equal costs for both components) *
- * - For all i: mvsadcost[0][i] == mvsadcost[0][-i] *
- * (Cost function is even) *
- * If these do not hold, then this function cannot be used without *
- * modification, in which case you can revert to using the C implementation, *
- * which does not rely on these properties. *
- *****************************************************************************/
-int vp9_diamond_search_sad_avx(const MACROBLOCK *x,
- const search_site_config *cfg, MV *ref_mv,
- MV *best_mv, int search_param, int sad_per_bit,
- int *num00, const vp9_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv) {
- const int_mv maxmv = pack_int_mv(x->mv_limits.row_max, x->mv_limits.col_max);
- const __m128i v_max_mv_w = _mm_set1_epi32((int)maxmv.as_int);
- const int_mv minmv = pack_int_mv(x->mv_limits.row_min, x->mv_limits.col_min);
- const __m128i v_min_mv_w = _mm_set1_epi32((int)minmv.as_int);
-
- const __m128i v_spb_d = _mm_set1_epi32(sad_per_bit);
-
- const __m128i v_joint_cost_0_d = _mm_set1_epi32(x->nmvjointsadcost[0]);
- const __m128i v_joint_cost_1_d = _mm_set1_epi32(x->nmvjointsadcost[1]);
-
- // search_param determines the length of the initial step and hence the number
- // of iterations.
- // 0 = initial step (MAX_FIRST_STEP) pel
- // 1 = (MAX_FIRST_STEP/2) pel,
- // 2 = (MAX_FIRST_STEP/4) pel...
- const MV *ss_mv = &cfg->ss_mv[cfg->searches_per_step * search_param];
- const intptr_t *ss_os = &cfg->ss_os[cfg->searches_per_step * search_param];
- const int tot_steps = cfg->total_steps - search_param;
-
- const int_mv fcenter_mv =
- pack_int_mv(center_mv->row >> 3, center_mv->col >> 3);
- const __m128i vfcmv = _mm_set1_epi32((int)fcenter_mv.as_int);
-
- const int ref_row = clamp(ref_mv->row, minmv.as_mv.row, maxmv.as_mv.row);
- const int ref_col = clamp(ref_mv->col, minmv.as_mv.col, maxmv.as_mv.col);
-
- int_mv bmv = pack_int_mv(ref_row, ref_col);
- int_mv new_bmv = bmv;
- __m128i v_bmv_w = _mm_set1_epi32((int)bmv.as_int);
-
- const int what_stride = x->plane[0].src.stride;
- const int in_what_stride = x->e_mbd.plane[0].pre[0].stride;
- const uint8_t *const what = x->plane[0].src.buf;
- const uint8_t *const in_what =
- x->e_mbd.plane[0].pre[0].buf + ref_row * in_what_stride + ref_col;
-
- // Work out the start point for the search
- const uint8_t *best_address = in_what;
- const uint8_t *new_best_address = best_address;
-#if VPX_ARCH_X86_64
- __m128i v_ba_q = _mm_set1_epi64x((intptr_t)best_address);
-#else
- __m128i v_ba_d = _mm_set1_epi32((intptr_t)best_address);
-#endif
-
- unsigned int best_sad;
- int i, j, step;
-
- // Check the prerequisite cost function properties that are easy to check
- // in an assert. See the function-level documentation for details on all
- // prerequisites.
- assert(x->nmvjointsadcost[1] == x->nmvjointsadcost[2]);
- assert(x->nmvjointsadcost[1] == x->nmvjointsadcost[3]);
-
- // Check the starting position
- best_sad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride);
- best_sad += mvsad_err_cost(x, bmv, &fcenter_mv.as_mv, sad_per_bit);
-
- *num00 = 0;
-
- for (i = 0, step = 0; step < tot_steps; step++) {
- for (j = 0; j < cfg->searches_per_step; j += 4, i += 4) {
- __m128i v_sad_d, v_cost_d, v_outside_d, v_inside_d, v_diff_mv_w;
-#if VPX_ARCH_X86_64
- __m128i v_blocka[2];
-#else
- __m128i v_blocka[1];
-#endif
-
- // Compute the candidate motion vectors
- const __m128i v_ss_mv_w = _mm_loadu_si128((const __m128i *)&ss_mv[i]);
- const __m128i v_these_mv_w = _mm_add_epi16(v_bmv_w, v_ss_mv_w);
- // Clamp them to the search bounds
- __m128i v_these_mv_clamp_w = v_these_mv_w;
- v_these_mv_clamp_w = _mm_min_epi16(v_these_mv_clamp_w, v_max_mv_w);
- v_these_mv_clamp_w = _mm_max_epi16(v_these_mv_clamp_w, v_min_mv_w);
- // The ones that did not change are inside the search area
- v_inside_d = _mm_cmpeq_epi32(v_these_mv_clamp_w, v_these_mv_w);
-
- // If none of them are inside, then move on
- if (LIKELY(_mm_test_all_zeros(v_inside_d, v_inside_d))) {
- continue;
- }
-
- // The inverse mask indicates which of the MVs are outside
- v_outside_d = _mm_xor_si128(v_inside_d, _mm_set1_epi8((int8_t)0xff));
- // Shift right to keep the sign bit clear, we will use this later
- // to set the cost to the maximum value.
- v_outside_d = _mm_srli_epi32(v_outside_d, 1);
-
- // Compute the difference MV
- v_diff_mv_w = _mm_sub_epi16(v_these_mv_clamp_w, vfcmv);
- // We utilise the fact that the cost function is even, and use the
- // absolute difference. This allows us to use unsigned indexes later
- // and reduces cache pressure somewhat as only a half of the table
- // is ever referenced.
- v_diff_mv_w = _mm_abs_epi16(v_diff_mv_w);
-
- // Compute the SIMD pointer offsets.
- {
-#if VPX_ARCH_X86_64 // sizeof(intptr_t) == 8
- // Load the offsets
- __m128i v_bo10_q = _mm_loadu_si128((const __m128i *)&ss_os[i + 0]);
- __m128i v_bo32_q = _mm_loadu_si128((const __m128i *)&ss_os[i + 2]);
- // Set the ones falling outside to zero
- v_bo10_q = _mm_and_si128(v_bo10_q, _mm_cvtepi32_epi64(v_inside_d));
- v_bo32_q =
- _mm_and_si128(v_bo32_q, _mm_unpackhi_epi32(v_inside_d, v_inside_d));
- // Compute the candidate addresses
- v_blocka[0] = _mm_add_epi64(v_ba_q, v_bo10_q);
- v_blocka[1] = _mm_add_epi64(v_ba_q, v_bo32_q);
-#else // VPX_ARCH_X86 // sizeof(intptr_t) == 4
- __m128i v_bo_d = _mm_loadu_si128((const __m128i *)&ss_os[i]);
- v_bo_d = _mm_and_si128(v_bo_d, v_inside_d);
- v_blocka[0] = _mm_add_epi32(v_ba_d, v_bo_d);
-#endif
- }
-
- fn_ptr->sdx4df(what, what_stride, (const uint8_t **)&v_blocka[0],
- in_what_stride, (uint32_t *)&v_sad_d);
-
- // Look up the component cost of the residual motion vector
- {
- const int32_t row0 = _mm_extract_epi16(v_diff_mv_w, 0);
- const int32_t col0 = _mm_extract_epi16(v_diff_mv_w, 1);
- const int32_t row1 = _mm_extract_epi16(v_diff_mv_w, 2);
- const int32_t col1 = _mm_extract_epi16(v_diff_mv_w, 3);
- const int32_t row2 = _mm_extract_epi16(v_diff_mv_w, 4);
- const int32_t col2 = _mm_extract_epi16(v_diff_mv_w, 5);
- const int32_t row3 = _mm_extract_epi16(v_diff_mv_w, 6);
- const int32_t col3 = _mm_extract_epi16(v_diff_mv_w, 7);
-
- // Note: This is a use case for vpgather in AVX2
- const uint32_t cost0 = x->nmvsadcost[0][row0] + x->nmvsadcost[0][col0];
- const uint32_t cost1 = x->nmvsadcost[0][row1] + x->nmvsadcost[0][col1];
- const uint32_t cost2 = x->nmvsadcost[0][row2] + x->nmvsadcost[0][col2];
- const uint32_t cost3 = x->nmvsadcost[0][row3] + x->nmvsadcost[0][col3];
-
- __m128i v_cost_10_d, v_cost_32_d;
- v_cost_10_d = _mm_cvtsi32_si128(cost0);
- v_cost_10_d = _mm_insert_epi32(v_cost_10_d, cost1, 1);
- v_cost_32_d = _mm_cvtsi32_si128(cost2);
- v_cost_32_d = _mm_insert_epi32(v_cost_32_d, cost3, 1);
- v_cost_d = _mm_unpacklo_epi64(v_cost_10_d, v_cost_32_d);
- }
-
- // Now add in the joint cost
- {
- const __m128i v_sel_d =
- _mm_cmpeq_epi32(v_diff_mv_w, _mm_setzero_si128());
- const __m128i v_joint_cost_d =
- _mm_blendv_epi8(v_joint_cost_1_d, v_joint_cost_0_d, v_sel_d);
- v_cost_d = _mm_add_epi32(v_cost_d, v_joint_cost_d);
- }
-
- // Multiply by sad_per_bit
- v_cost_d = _mm_mullo_epi32(v_cost_d, v_spb_d);
- // ROUND_POWER_OF_TWO(v_cost_d, VP9_PROB_COST_SHIFT)
- v_cost_d = _mm_add_epi32(v_cost_d,
- _mm_set1_epi32(1 << (VP9_PROB_COST_SHIFT - 1)));
- v_cost_d = _mm_srai_epi32(v_cost_d, VP9_PROB_COST_SHIFT);
- // Add the cost to the sad
- v_sad_d = _mm_add_epi32(v_sad_d, v_cost_d);
-
- // Make the motion vectors outside the search area have max cost
- // by or'ing in the comparison mask, this way the minimum search won't
- // pick them.
- v_sad_d = _mm_or_si128(v_sad_d, v_outside_d);
-
- // Find the minimum value and index horizontally in v_sad_d
- {
- // Try speculatively on 16 bits, so we can use the minpos intrinsic
- const __m128i v_sad_w = _mm_packus_epi32(v_sad_d, v_sad_d);
- const __m128i v_minp_w = _mm_minpos_epu16(v_sad_w);
-
- uint32_t local_best_sad = _mm_extract_epi16(v_minp_w, 0);
- uint32_t local_best_idx = _mm_extract_epi16(v_minp_w, 1);
-
- // If the local best value is not saturated, just use it, otherwise
- // find the horizontal minimum again the hard way on 32 bits.
- // This is executed rarely.
- if (UNLIKELY(local_best_sad == 0xffff)) {
- __m128i v_loval_d, v_hival_d, v_loidx_d, v_hiidx_d, v_sel_d;
-
- v_loval_d = v_sad_d;
- v_loidx_d = _mm_set_epi32(3, 2, 1, 0);
- v_hival_d = _mm_srli_si128(v_loval_d, 8);
- v_hiidx_d = _mm_srli_si128(v_loidx_d, 8);
-
- v_sel_d = _mm_cmplt_epi32(v_hival_d, v_loval_d);
-
- v_loval_d = _mm_blendv_epi8(v_loval_d, v_hival_d, v_sel_d);
- v_loidx_d = _mm_blendv_epi8(v_loidx_d, v_hiidx_d, v_sel_d);
- v_hival_d = _mm_srli_si128(v_loval_d, 4);
- v_hiidx_d = _mm_srli_si128(v_loidx_d, 4);
-
- v_sel_d = _mm_cmplt_epi32(v_hival_d, v_loval_d);
-
- v_loval_d = _mm_blendv_epi8(v_loval_d, v_hival_d, v_sel_d);
- v_loidx_d = _mm_blendv_epi8(v_loidx_d, v_hiidx_d, v_sel_d);
-
- local_best_sad = _mm_extract_epi32(v_loval_d, 0);
- local_best_idx = _mm_extract_epi32(v_loidx_d, 0);
- }
-
- // Update the global minimum if the local minimum is smaller
- if (LIKELY(local_best_sad < best_sad)) {
-#if defined(__GNUC__) && __GNUC__ >= 4 && !defined(__clang__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
-#endif
- new_bmv = ((const int_mv *)&v_these_mv_w)[local_best_idx];
-#if defined(__GNUC__) && __GNUC__ >= 4 && !defined(__clang__)
-#pragma GCC diagnostic pop
-#endif
- new_best_address = ((const uint8_t **)v_blocka)[local_best_idx];
-
- best_sad = local_best_sad;
- }
- }
- }
-
- bmv = new_bmv;
- best_address = new_best_address;
-
- v_bmv_w = _mm_set1_epi32((int)bmv.as_int);
-#if VPX_ARCH_X86_64
- v_ba_q = _mm_set1_epi64x((intptr_t)best_address);
-#else
- v_ba_d = _mm_set1_epi32((intptr_t)best_address);
-#endif
-
- if (UNLIKELY(best_address == in_what)) {
- (*num00)++;
- }
- }
-
- *best_mv = bmv.as_mv;
- return best_sad;
-}
diff --git a/vp9/encoder/x86/vp9_frame_scale_ssse3.c b/vp9/encoder/x86/vp9_frame_scale_ssse3.c
index bf0e8b121..94506aad0 100644
--- a/vp9/encoder/x86/vp9_frame_scale_ssse3.c
+++ b/vp9/encoder/x86/vp9_frame_scale_ssse3.c
@@ -469,18 +469,18 @@ static void scale_plane_4_to_3_general(const uint8_t *src, const int src_stride,
// It's used to choose the src offset and filter coefficient offset.
const int offset_idx1 = (offset1_q4 >> 4) & 1;
const int offset_idx2 = (offset2_q4 >> 4) & 1;
- static const shuffle_filter_funcs shuffle_filter_funcs[2] = {
+ static const shuffle_filter_funcs kShuffleFilterFuncs[2] = {
shuffle_filter_ssse3, shuffle_filter_odd_ssse3
};
- static const convolve8_funcs convolve8_funcs[2] = {
+ static const convolve8_funcs kConvolve8Funcs[2] = {
convolve8_8_even_offset_ssse3, convolve8_8_odd_offset_ssse3
};
assert(w && h);
shuffle_filter_ssse3(coef[(phase_scaler + 0 * step_q4) & SUBPEL_MASK], f0);
- shuffle_filter_funcs[offset_idx1](coef[offset1_q4 & SUBPEL_MASK], f1);
- shuffle_filter_funcs[offset_idx2](coef[offset2_q4 & SUBPEL_MASK], f2);
+ kShuffleFilterFuncs[offset_idx1](coef[offset1_q4 & SUBPEL_MASK], f1);
+ kShuffleFilterFuncs[offset_idx2](coef[offset2_q4 & SUBPEL_MASK], f2);
// Sub 64 to avoid overflow.
// Coef 128 would be treated as -128 in PMADDUBSW. Sub 64 here.
@@ -522,11 +522,11 @@ static void scale_plane_4_to_3_general(const uint8_t *src, const int src_stride,
// 04 14 24 34 44 54 64 74
// 05 15 25 35 45 55 65 75
d[0] = convolve8_8_even_offset_ssse3(&s[0], f0);
- d[1] = convolve8_funcs[offset_idx1](&s[offset1_q4 >> 5], f1);
- d[2] = convolve8_funcs[offset_idx2](&s[offset2_q4 >> 5], f2);
+ d[1] = kConvolve8Funcs[offset_idx1](&s[offset1_q4 >> 5], f1);
+ d[2] = kConvolve8Funcs[offset_idx2](&s[offset2_q4 >> 5], f2);
d[3] = convolve8_8_even_offset_ssse3(&s[2], f0);
- d[4] = convolve8_funcs[offset_idx1](&s[2 + (offset1_q4 >> 5)], f1);
- d[5] = convolve8_funcs[offset_idx2](&s[2 + (offset2_q4 >> 5)], f2);
+ d[4] = kConvolve8Funcs[offset_idx1](&s[2 + (offset1_q4 >> 5)], f1);
+ d[5] = kConvolve8Funcs[offset_idx2](&s[2 + (offset2_q4 >> 5)], f2);
// 00 10 20 30 40 50 60 70 02 12 22 32 42 52 62 72
// 01 11 21 31 41 51 61 71 03 13 23 33 43 53 63 73
@@ -598,11 +598,11 @@ static void scale_plane_4_to_3_general(const uint8_t *src, const int src_stride,
loadu_8bit_16x4(t, stride_hor, &s[4]);
d[0] = convolve8_8_even_offset_ssse3(&s[0], f0);
- d[1] = convolve8_funcs[offset_idx1](&s[offset1_q4 >> 5], f1);
- d[2] = convolve8_funcs[offset_idx2](&s[offset2_q4 >> 5], f2);
+ d[1] = kConvolve8Funcs[offset_idx1](&s[offset1_q4 >> 5], f1);
+ d[2] = kConvolve8Funcs[offset_idx2](&s[offset2_q4 >> 5], f2);
d[3] = convolve8_8_even_offset_ssse3(&s[2], f0);
- d[4] = convolve8_funcs[offset_idx1](&s[2 + (offset1_q4 >> 5)], f1);
- d[5] = convolve8_funcs[offset_idx2](&s[2 + (offset2_q4 >> 5)], f2);
+ d[4] = kConvolve8Funcs[offset_idx1](&s[2 + (offset1_q4 >> 5)], f1);
+ d[5] = kConvolve8Funcs[offset_idx2](&s[2 + (offset2_q4 >> 5)], f2);
// 00 01 02 03 04 05 06 07 10 11 12 13 14 15 16 17
// 20 21 22 23 24 25 26 27 30 31 32 33 34 35 36 37
diff --git a/vp9/encoder/x86/vp9_quantize_avx2.c b/vp9/encoder/x86/vp9_quantize_avx2.c
index da285be8e..bf44b0867 100644
--- a/vp9/encoder/x86/vp9_quantize_avx2.c
+++ b/vp9/encoder/x86/vp9_quantize_avx2.c
@@ -16,6 +16,8 @@
#include "vpx_dsp/vpx_dsp_common.h"
#include "vpx_dsp/x86/bitdepth_conversion_avx2.h"
#include "vpx_dsp/x86/quantize_sse2.h"
+#include "vp9/common/vp9_scan.h"
+#include "vp9/encoder/vp9_block.h"
// Zero fill 8 positions in the output buffer.
static VPX_FORCE_INLINE void store_zero_tran_low(tran_low_t *a) {
@@ -29,11 +31,13 @@ static VPX_FORCE_INLINE void store_zero_tran_low(tran_low_t *a) {
}
static VPX_FORCE_INLINE void load_fp_values_avx2(
- const int16_t *round_ptr, __m256i *round, const int16_t *quant_ptr,
- __m256i *quant, const int16_t *dequant_ptr, __m256i *dequant) {
- *round = _mm256_castsi128_si256(_mm_load_si128((const __m128i *)round_ptr));
+ const struct macroblock_plane *mb_plane, __m256i *round, __m256i *quant,
+ const int16_t *dequant_ptr, __m256i *dequant) {
+ *round = _mm256_castsi128_si256(
+ _mm_load_si128((const __m128i *)mb_plane->round_fp));
*round = _mm256_permute4x64_epi64(*round, 0x54);
- *quant = _mm256_castsi128_si256(_mm_load_si128((const __m128i *)quant_ptr));
+ *quant = _mm256_castsi128_si256(
+ _mm_load_si128((const __m128i *)mb_plane->quant_fp));
*quant = _mm256_permute4x64_epi64(*quant, 0x54);
*dequant =
_mm256_castsi128_si256(_mm_load_si128((const __m128i *)dequant_ptr));
@@ -98,13 +102,13 @@ static VPX_FORCE_INLINE void quantize_fp_16(
}
void vp9_quantize_fp_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
- const int16_t *round_ptr, const int16_t *quant_ptr,
+ const struct macroblock_plane *const mb_plane,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr, uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan) {
+ const struct ScanOrder *const scan_order) {
__m256i round, quant, dequant, thr;
__m256i eob_max = _mm256_setzero_si256();
- (void)scan;
+ const int16_t *iscan = scan_order->iscan;
coeff_ptr += n_coeffs;
iscan += n_coeffs;
@@ -113,8 +117,7 @@ void vp9_quantize_fp_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
n_coeffs = -n_coeffs;
// Setup global values
- load_fp_values_avx2(round_ptr, &round, quant_ptr, &quant, dequant_ptr,
- &dequant);
+ load_fp_values_avx2(mb_plane, &round, &quant, dequant_ptr, &dequant);
thr = _mm256_setzero_si256();
quantize_fp_16(&round, &quant, &dequant, &thr, coeff_ptr + n_coeffs,
@@ -203,14 +206,13 @@ static VPX_FORCE_INLINE void quantize_fp_32x32_16(
}
void vp9_quantize_fp_32x32_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
- const int16_t *round_ptr,
- const int16_t *quant_ptr,
+ const struct macroblock_plane *const mb_plane,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr, uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan) {
+ const struct ScanOrder *const scan_order) {
__m256i round, quant, dequant, thr;
__m256i eob_max = _mm256_setzero_si256();
- (void)scan;
+ const int16_t *iscan = scan_order->iscan;
coeff_ptr += n_coeffs;
iscan += n_coeffs;
@@ -219,8 +221,7 @@ void vp9_quantize_fp_32x32_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
n_coeffs = -n_coeffs;
// Setup global values
- load_fp_values_avx2(round_ptr, &round, quant_ptr, &quant, dequant_ptr,
- &dequant);
+ load_fp_values_avx2(mb_plane, &round, &quant, dequant_ptr, &dequant);
thr = _mm256_srli_epi16(dequant, 2);
quant = _mm256_slli_epi16(quant, 1);
{
@@ -286,16 +287,17 @@ static VPX_FORCE_INLINE __m256i highbd_init_256(const int16_t *val_ptr) {
}
static VPX_FORCE_INLINE void highbd_load_fp_values(
- const int16_t *round_ptr, __m256i *round, const int16_t *quant_ptr,
- __m256i *quant, const int16_t *dequant_ptr, __m256i *dequant) {
- *round = highbd_init_256(round_ptr);
- *quant = highbd_init_256(quant_ptr);
+ const struct macroblock_plane *mb_plane, __m256i *round, __m256i *quant,
+ const int16_t *dequant_ptr, __m256i *dequant) {
+ *round = highbd_init_256(mb_plane->round_fp);
+ *quant = highbd_init_256(mb_plane->quant_fp);
*dequant = highbd_init_256(dequant_ptr);
}
static VPX_FORCE_INLINE __m256i highbd_get_max_lane_eob(
const int16_t *iscan_ptr, __m256i eobmax, __m256i nz_mask) {
- const __m256i packed_nz_mask = _mm256_packs_epi32(nz_mask, nz_mask);
+ const __m256i packed_nz_mask =
+ _mm256_packs_epi32(nz_mask, _mm256_setzero_si256());
const __m256i packed_nz_mask_perm =
_mm256_permute4x64_epi64(packed_nz_mask, 0xD8);
const __m256i iscan =
@@ -324,16 +326,15 @@ static VPX_FORCE_INLINE void highbd_quantize_fp(
}
void vp9_highbd_quantize_fp_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
- const int16_t *round_ptr,
- const int16_t *quant_ptr,
+ const struct macroblock_plane *const mb_plane,
tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr, uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan) {
+ const struct ScanOrder *const scan_order) {
const int step = 8;
__m256i round, quant, dequant;
__m256i eob_max = _mm256_setzero_si256();
- (void)scan;
+ const int16_t *iscan = scan_order->iscan;
coeff_ptr += n_coeffs;
iscan += n_coeffs;
@@ -342,8 +343,7 @@ void vp9_highbd_quantize_fp_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
n_coeffs = -n_coeffs;
// Setup global values
- highbd_load_fp_values(round_ptr, &round, quant_ptr, &quant, dequant_ptr,
- &dequant);
+ highbd_load_fp_values(mb_plane, &round, &quant, dequant_ptr, &dequant);
highbd_quantize_fp(&round, &quant, &dequant, coeff_ptr + n_coeffs,
iscan + n_coeffs, qcoeff_ptr + n_coeffs,
@@ -390,14 +390,14 @@ static VPX_FORCE_INLINE void highbd_quantize_fp_32x32(
}
void vp9_highbd_quantize_fp_32x32_avx2(
- const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *round_ptr,
- const int16_t *quant_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
- const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan,
- const int16_t *iscan) {
+ const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ const struct macroblock_plane *const mb_plane, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const struct ScanOrder *const scan_order) {
const int step = 8;
__m256i round, quant, dequant, thr;
__m256i eob_max = _mm256_setzero_si256();
- (void)scan;
+ const int16_t *iscan = scan_order->iscan;
coeff_ptr += n_coeffs;
iscan += n_coeffs;
@@ -406,8 +406,7 @@ void vp9_highbd_quantize_fp_32x32_avx2(
n_coeffs = -n_coeffs;
// Setup global values
- highbd_load_fp_values(round_ptr, &round, quant_ptr, &quant, dequant_ptr,
- &dequant);
+ highbd_load_fp_values(mb_plane, &round, &quant, dequant_ptr, &dequant);
thr = _mm256_srli_epi32(dequant, 2);
// Subtracting 1 here eliminates a _mm256_cmpeq_epi32() instruction when
// calculating the zbin mask.
diff --git a/vp9/encoder/x86/vp9_quantize_sse2.c b/vp9/encoder/x86/vp9_quantize_sse2.c
index c87723443..2481eb366 100644
--- a/vp9/encoder/x86/vp9_quantize_sse2.c
+++ b/vp9/encoder/x86/vp9_quantize_sse2.c
@@ -17,12 +17,14 @@
#include "vpx_dsp/vpx_dsp_common.h"
#include "vpx_dsp/x86/bitdepth_conversion_sse2.h"
#include "vpx_dsp/x86/quantize_sse2.h"
+#include "vp9/common/vp9_scan.h"
+#include "vp9/encoder/vp9_block.h"
void vp9_quantize_fp_sse2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
- const int16_t *round_ptr, const int16_t *quant_ptr,
+ const struct macroblock_plane *const mb_plane,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr, uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan) {
+ const struct ScanOrder *const scan_order) {
const __m128i zero = _mm_setzero_si128();
__m128i thr;
int nzflag;
@@ -31,11 +33,10 @@ void vp9_quantize_fp_sse2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
__m128i coeff0, coeff1, coeff0_sign, coeff1_sign;
__m128i qcoeff0, qcoeff1;
__m128i eob;
-
- (void)scan;
+ const int16_t *iscan = scan_order->iscan;
// Setup global values.
- load_fp_values(round_ptr, &round, quant_ptr, &quant, dequant_ptr, &dequant);
+ load_fp_values(mb_plane, &round, &quant, dequant_ptr, &dequant);
// Do DC and first 15 AC.
coeff0 = load_tran_low(coeff_ptr);
diff --git a/vp9/encoder/x86/vp9_quantize_ssse3.c b/vp9/encoder/x86/vp9_quantize_ssse3.c
index d35004e37..98decae74 100644
--- a/vp9/encoder/x86/vp9_quantize_ssse3.c
+++ b/vp9/encoder/x86/vp9_quantize_ssse3.c
@@ -17,12 +17,14 @@
#include "vpx_dsp/x86/bitdepth_conversion_sse2.h"
#include "vpx_dsp/x86/quantize_sse2.h"
#include "vpx_dsp/x86/quantize_ssse3.h"
+#include "vp9/common/vp9_scan.h"
+#include "vp9/encoder/vp9_block.h"
void vp9_quantize_fp_ssse3(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
- const int16_t *round_ptr, const int16_t *quant_ptr,
+ const struct macroblock_plane *const mb_plane,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr, uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan) {
+ const struct ScanOrder *const scan_order) {
const __m128i zero = _mm_setzero_si128();
__m128i thr;
int nzflag;
@@ -31,11 +33,10 @@ void vp9_quantize_fp_ssse3(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
__m128i coeff0, coeff1;
__m128i qcoeff0, qcoeff1;
__m128i eob;
-
- (void)scan;
+ const int16_t *iscan = scan_order->iscan;
// Setup global values.
- load_fp_values(round_ptr, &round, quant_ptr, &quant, dequant_ptr, &dequant);
+ load_fp_values(mb_plane, &round, &quant, dequant_ptr, &dequant);
// Do DC and first 15 AC.
coeff0 = load_tran_low(coeff_ptr);
@@ -119,12 +120,11 @@ void vp9_quantize_fp_ssse3(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
}
void vp9_quantize_fp_32x32_ssse3(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
- const int16_t *round_ptr,
- const int16_t *quant_ptr,
+ const struct macroblock_plane *const mb_plane,
tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr, uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan) {
+ const struct ScanOrder *const scan_order) {
const __m128i zero = _mm_setzero_si128();
const __m128i one_s16 = _mm_set1_epi16(1);
__m128i thr;
@@ -134,11 +134,10 @@ void vp9_quantize_fp_32x32_ssse3(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
__m128i coeff0, coeff1;
__m128i qcoeff0, qcoeff1;
__m128i eob;
-
- (void)scan;
+ const int16_t *iscan = scan_order->iscan;
// Setup global values.
- load_fp_values(round_ptr, &round, quant_ptr, &quant, dequant_ptr, &dequant);
+ load_fp_values(mb_plane, &round, &quant, dequant_ptr, &dequant);
// The 32x32 halves round.
round = _mm_add_epi16(round, one_s16);
round = _mm_srli_epi16(round, 1);