summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Chromium Automerger <chromium-automerger@android>2014-10-21 14:28:26 +0000
committerAndroid Chromium Automerger <chromium-automerger@android>2014-10-21 14:28:26 +0000
commit7f1c3ffbeee3fa75cc21dbb183d8995145033a98 (patch)
treef7a23f1d5467c75526b3043349cfb2beed8de9d4
parent92cea4480d6ca054b05e6ef0254cce03a28f2cdc (diff)
parent5a09c3ef2aa3e6b6da4007746fdde04ca56dae7c (diff)
downloadlibyuv-7f1c3ffbeee3fa75cc21dbb183d8995145033a98.tar.gz
Merge third_party/libyuv from https://chromium.googlesource.com/external/libyuv.git at 5a09c3ef2aa3e6b6da4007746fdde04ca56dae7c
This commit was generated by merge_from_chromium.py. Change-Id: I4e8f5ebb546970fb9d740a014d950f9be50d47a5
-rw-r--r--BUILD.gn1
-rw-r--r--README.chromium2
-rw-r--r--include/libyuv/row.h32
-rw-r--r--include/libyuv/version.h2
-rw-r--r--source/convert_from.cc8
-rw-r--r--source/planar_functions.cc24
-rw-r--r--source/row_any.cc3
-rw-r--r--source/row_win.cc74
8 files changed, 114 insertions, 32 deletions
diff --git a/BUILD.gn b/BUILD.gn
index 8641bcd..1f84d38 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -84,7 +84,6 @@ source_set("libyuv") {
}
deps = [
- "//ppapi:ppapi_macros", # Allow include of pp_macros.h.
"//third_party:jpeg",
]
diff --git a/README.chromium b/README.chromium
index 600d33f..90ff5c2 100644
--- a/README.chromium
+++ b/README.chromium
@@ -1,6 +1,6 @@
Name: libyuv
URL: http://code.google.com/p/libyuv/
-Version: 1125
+Version: 1130
License: BSD
License File: LICENSE
diff --git a/include/libyuv/row.h b/include/libyuv/row.h
index 07d3875..058122b 100644
--- a/include/libyuv/row.h
+++ b/include/libyuv/row.h
@@ -15,10 +15,6 @@
#include "libyuv/basic_types.h"
-#if defined(__native_client__)
-#include "ppapi/c/pp_macros.h" // For PPAPI_RELEASE
-#endif
-
#ifdef __cplusplus
namespace libyuv {
extern "C" {
@@ -51,13 +47,6 @@ extern "C" {
#define LIBYUV_SSSE3_ONLY
#endif
-// Enable for NaCL pepper 33 for bundle and AVX2 support.
-#if defined(__native_client__) && PPAPI_RELEASE >= 33
-#define NEW_BINUTILS
-#endif
-#if defined(__native_client__) && defined(__arm__) && PPAPI_RELEASE < 37
-#define LIBYUV_DISABLE_NEON
-#endif
// clang >= 3.5.0 required for Arm64.
#if defined(__clang__) && defined(__aarch64__) && !defined(LIBYUV_DISABLE_NEON)
#if (__clang_major__ < 3) || (__clang_major__ == 3 && (__clang_minor__ < 5))
@@ -65,7 +54,6 @@ extern "C" {
#endif // clang >= 3.5
#endif // __clang__
-
// The following are available on all x86 platforms:
#if !defined(LIBYUV_DISABLE_X86) && \
(defined(_M_IX86) || defined(__x86_64__) || defined(__i386__))
@@ -212,6 +200,7 @@ extern "C" {
#define HAS_ARGBTOYJROW_AVX2
#define HAS_ARGBTOYROW_AVX2
#define HAS_I422TOARGBROW_AVX2
+#define HAS_I422TOBGRAROW_AVX2
#define HAS_INTERPOLATEROW_AVX2
#define HAS_MERGEUVROW_AVX2
#define HAS_MIRRORROW_AVX2
@@ -496,24 +485,15 @@ typedef uint8 uvec8[16];
#endif
// NaCL macros for GCC x86 and x64.
-
-// TODO(nfullagar): When pepper_33 toolchain is distributed, default to
-// NEW_BINUTILS and remove all BUNDLEALIGN occurances.
#if defined(__native_client__)
#define LABELALIGN ".p2align 5\n"
#else
#define LABELALIGN ".p2align 2\n"
#endif
#if defined(__native_client__) && defined(__x86_64__)
-#if defined(NEW_BINUTILS)
#define BUNDLELOCK ".bundle_lock\n"
#define BUNDLEUNLOCK ".bundle_unlock\n"
#define BUNDLEALIGN "\n"
-#else
-#define BUNDLELOCK "\n"
-#define BUNDLEUNLOCK "\n"
-#define BUNDLEALIGN ".p2align 5\n"
-#endif
#define MEMACCESS(base) "%%nacl:(%%r15,%q" #base ")"
#define MEMACCESS2(offset, base) "%%nacl:" #offset "(%%r15,%q" #base ")"
#define MEMLEA(offset, base) #offset "(%q" #base ")"
@@ -1055,6 +1035,11 @@ void I422ToARGBRow_AVX2(const uint8* src_y,
const uint8* src_v,
uint8* dst_argb,
int width);
+void I422ToBGRARow_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
void I444ToARGBRow_SSSE3(const uint8* src_y,
const uint8* src_u,
const uint8* src_v,
@@ -1137,6 +1122,11 @@ void I422ToARGBRow_Any_AVX2(const uint8* src_y,
const uint8* src_v,
uint8* dst_argb,
int width);
+void I422ToBGRARow_Any_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
void I444ToARGBRow_Any_SSSE3(const uint8* src_y,
const uint8* src_u,
const uint8* src_v,
diff --git a/include/libyuv/version.h b/include/libyuv/version.h
index 349b523..2e34fc8 100644
--- a/include/libyuv/version.h
+++ b/include/libyuv/version.h
@@ -11,6 +11,6 @@
#ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT
#define INCLUDE_LIBYUV_VERSION_H_
-#define LIBYUV_VERSION 1125
+#define LIBYUV_VERSION 1130
#endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT
diff --git a/source/convert_from.cc b/source/convert_from.cc
index 2220005..e044e03 100644
--- a/source/convert_from.cc
+++ b/source/convert_from.cc
@@ -551,6 +551,14 @@ int I420ToBGRA(const uint8* src_y, int src_stride_y,
}
}
#endif
+#if defined(HAS_I422TOBGRAROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2) && width >= 16) {
+ I422ToBGRARow = I422ToBGRARow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToBGRARow = I422ToBGRARow_AVX2;
+ }
+ }
+#endif
#if defined(HAS_I422TOBGRAROW_NEON)
if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
I422ToBGRARow = I422ToBGRARow_Any_NEON;
diff --git a/source/planar_functions.cc b/source/planar_functions.cc
index 661cad9..b21192b 100644
--- a/source/planar_functions.cc
+++ b/source/planar_functions.cc
@@ -793,14 +793,6 @@ int I422ToBGRA(const uint8* src_y, int src_stride_y,
height = 1;
src_stride_y = src_stride_u = src_stride_v = dst_stride_bgra = 0;
}
-#if defined(HAS_I422TOBGRAROW_NEON)
- if (TestCpuFlag(kCpuHasNEON)) {
- I422ToBGRARow = I422ToBGRARow_Any_NEON;
- if (IS_ALIGNED(width, 16)) {
- I422ToBGRARow = I422ToBGRARow_NEON;
- }
- }
-#endif
#if defined(HAS_I422TOBGRAROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3) && width >= 8) {
I422ToBGRARow = I422ToBGRARow_Any_SSSE3;
@@ -809,6 +801,22 @@ int I422ToBGRA(const uint8* src_y, int src_stride_y,
}
}
#endif
+#if defined(HAS_I422TOBGRAROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2) && width >= 16) {
+ I422ToBGRARow = I422ToBGRARow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToBGRARow = I422ToBGRARow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_I422TOBGRAROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
+ I422ToBGRARow = I422ToBGRARow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToBGRARow = I422ToBGRARow_NEON;
+ }
+ }
+#endif
#if defined(HAS_I422TOBGRAROW_MIPS_DSPR2)
if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) &&
IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
diff --git a/source/row_any.cc b/source/row_any.cc
index 9d8a5e5..b1ede4e 100644
--- a/source/row_any.cc
+++ b/source/row_any.cc
@@ -65,6 +65,9 @@ YANY(I422ToUYVYRow_Any_SSE2, I422ToUYVYRow_SSE2, I422ToUYVYRow_C, 1, 2, 15)
#ifdef HAS_I422TOARGBROW_AVX2
YANY(I422ToARGBRow_Any_AVX2, I422ToARGBRow_AVX2, I422ToARGBRow_C, 1, 4, 15)
#endif // HAS_I422TOARGBROW_AVX2
+#ifdef HAS_I422TOBGRAROW_AVX2
+YANY(I422ToBGRARow_Any_AVX2, I422ToBGRARow_AVX2, I422ToBGRARow_C, 1, 4, 15)
+#endif // HAS_I422TOBGRAROW_AVX2
#ifdef HAS_I422TOARGBROW_NEON
YANY(I444ToARGBRow_Any_NEON, I444ToARGBRow_NEON, I444ToARGBRow_C, 0, 4, 7)
YANY(I422ToARGBRow_Any_NEON, I422ToARGBRow_NEON, I422ToARGBRow_C, 1, 4, 7)
diff --git a/source/row_win.cc b/source/row_win.cc
index 969d78c..d0a1059 100644
--- a/source/row_win.cc
+++ b/source/row_win.cc
@@ -1600,6 +1600,80 @@ void I422ToARGBRow_AVX2(const uint8* y_buf,
ret
}
}
+
+// 16 pixels
+// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 BGRA (64 bytes).
+// TODO(fbarchard): Use macros to reduce duplicate code. See SSSE3.
+__declspec(naked) __declspec(align(16))
+void I422ToBGRARow_AVX2(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // Y
+ mov esi, [esp + 8 + 8] // U
+ mov edi, [esp + 8 + 12] // V
+ mov edx, [esp + 8 + 16] // argb
+ mov ecx, [esp + 8 + 20] // width
+ sub edi, esi
+ vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha
+ vpxor ymm4, ymm4, ymm4
+
+ align 4
+ convertloop:
+ vmovq xmm0, qword ptr [esi] // U
+ vmovq xmm1, qword ptr [esi + edi] // V
+ lea esi, [esi + 8]
+ vpunpcklbw ymm0, ymm0, ymm1 // UV
+ vpermq ymm0, ymm0, 0xd8
+ vpunpcklwd ymm0, ymm0, ymm0 // UVUV
+ vpmaddubsw ymm2, ymm0, kUVToB_AVX // scale B UV
+ vpmaddubsw ymm1, ymm0, kUVToG_AVX // scale G UV
+ vpmaddubsw ymm0, ymm0, kUVToR_AVX // scale R UV
+ vpsubw ymm2, ymm2, kUVBiasB_AVX // unbias back to signed
+ vpsubw ymm1, ymm1, kUVBiasG_AVX
+ vpsubw ymm0, ymm0, kUVBiasR_AVX
+
+ // Step 2: Find Y contribution to 16 R,G,B values
+ vmovdqu xmm3, [eax] // NOLINT
+ lea eax, [eax + 16]
+ vpermq ymm3, ymm3, 0xd8
+ vpunpcklbw ymm3, ymm3, ymm4
+ vpsubsw ymm3, ymm3, kYSub16_AVX
+ vpmullw ymm3, ymm3, kYToRgb_AVX
+ vpaddsw ymm2, ymm2, ymm3 // B += Y
+ vpaddsw ymm1, ymm1, ymm3 // G += Y
+ vpaddsw ymm0, ymm0, ymm3 // R += Y
+ vpsraw ymm2, ymm2, 6
+ vpsraw ymm1, ymm1, 6
+ vpsraw ymm0, ymm0, 6
+// TODO(fbarchard): Switch register order to match SSSE3.
+ vpackuswb ymm2, ymm2, ymm2 // B
+ vpackuswb ymm1, ymm1, ymm1 // G
+ vpackuswb ymm0, ymm0, ymm0 // R
+
+ // Step 3: Weave into BGRA
+ vpunpcklbw ymm1, ymm1, ymm2 // GB
+ vpermq ymm1, ymm1, 0xd8
+ vpunpcklbw ymm0, ymm5, ymm0 // AR
+ vpermq ymm0, ymm0, 0xd8
+ vpunpcklwd ymm2, ymm0, ymm1 // ARGB first 8 pixels
+ vpunpckhwd ymm0, ymm0, ymm1 // ARGB next 8 pixels
+ vmovdqu [edx], ymm2
+ vmovdqu [edx + 32], ymm0
+ lea edx, [edx + 64]
+ sub ecx, 16
+ jg convertloop
+ vzeroupper
+
+ pop edi
+ pop esi
+ ret
+ }
+}
#endif // HAS_I422TOARGBROW_AVX2
#ifdef HAS_I422TOARGBROW_SSSE3