aboutsummaryrefslogtreecommitdiff
path: root/src/dsp
diff options
context:
space:
mode:
authorDerek Sollenberger <djsollen@google.com>2013-02-07 18:38:54 +0000
committerAndroid (Google) Code Review <android-gerrit@google.com>2013-02-07 18:38:54 +0000
commitb6dbce6bfeaabde2a7b581c4c6888d532d32f3ac (patch)
tree1dd0a6f6cc091618520b48d4127d62c8c880e1d0 /src/dsp
parent4b2196c929b70f2cdc1c2556580d349db89356d8 (diff)
downloadwebp-b6dbce6bfeaabde2a7b581c4c6888d532d32f3ac.tar.gz
Revert "Sync libwebp with head#I6ecefe33"
This reverts commit 4b2196c929b70f2cdc1c2556580d349db89356d8 Change-Id: I3c026866c336663666cb5a2e9e34ecffd1f05595
Diffstat (limited to 'src/dsp')
-rw-r--r--src/dsp/cpu-features.c396
-rw-r--r--src/dsp/cpu-features.h56
-rw-r--r--src/dsp/cpu.c28
-rw-r--r--src/dsp/dec_neon.c88
-rw-r--r--src/dsp/dec_sse2.c37
-rw-r--r--src/dsp/dsp.h23
-rw-r--r--src/dsp/enc.c117
-rw-r--r--src/dsp/enc_neon.c661
-rw-r--r--src/dsp/enc_sse2.c321
-rw-r--r--src/dsp/lossless.c27
-rw-r--r--src/dsp/upsampling.c10
-rw-r--r--src/dsp/upsampling_neon.c292
-rw-r--r--src/dsp/upsampling_sse2.c38
-rw-r--r--src/dsp/yuv.c15
-rw-r--r--src/dsp/yuv.h109
15 files changed, 287 insertions, 1931 deletions
diff --git a/src/dsp/cpu-features.c b/src/dsp/cpu-features.c
deleted file mode 100644
index 6a5cd8f1..00000000
--- a/src/dsp/cpu-features.c
+++ /dev/null
@@ -1,396 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <sys/system_properties.h>
-#ifdef __arm__
-#include <machine/cpu-features.h>
-#endif
-#include <pthread.h>
-#include "cpu-features.h"
-#include <stdio.h>
-#include <stdlib.h>
-#include <fcntl.h>
-#include <errno.h>
-
-static pthread_once_t g_once;
-static AndroidCpuFamily g_cpuFamily;
-static uint64_t g_cpuFeatures;
-static int g_cpuCount;
-
-static const int android_cpufeatures_debug = 0;
-
-#ifdef __arm__
-# define DEFAULT_CPU_FAMILY ANDROID_CPU_FAMILY_ARM
-#elif defined __i386__
-# define DEFAULT_CPU_FAMILY ANDROID_CPU_FAMILY_X86
-#else
-# define DEFAULT_CPU_FAMILY ANDROID_CPU_FAMILY_UNKNOWN
-#endif
-
-#define D(...) \
- do { \
- if (android_cpufeatures_debug) { \
- printf(__VA_ARGS__); fflush(stdout); \
- } \
- } while (0)
-
-#ifdef __i386__
-static __inline__ void x86_cpuid(int func, int values[4])
-{
- int a, b, c, d;
- /* We need to preserve ebx since we're compiling PIC code */
- /* this means we can't use "=b" for the second output register */
- __asm__ __volatile__ ( \
- "push %%ebx\n"
- "cpuid\n" \
- "mov %1, %%ebx\n"
- "pop %%ebx\n"
- : "=a" (a), "=r" (b), "=c" (c), "=d" (d) \
- : "a" (func) \
- );
- values[0] = a;
- values[1] = b;
- values[2] = c;
- values[3] = d;
-}
-#endif
-
-/* Read the content of /proc/cpuinfo into a user-provided buffer.
- * Return the length of the data, or -1 on error. Does *not*
- * zero-terminate the content. Will not read more
- * than 'buffsize' bytes.
- */
-static int
-read_file(const char* pathname, char* buffer, size_t buffsize)
-{
- int fd, len;
-
- fd = open(pathname, O_RDONLY);
- if (fd < 0)
- return -1;
-
- do {
- len = read(fd, buffer, buffsize);
- } while (len < 0 && errno == EINTR);
-
- close(fd);
-
- return len;
-}
-
-/* Extract the content of a the first occurence of a given field in
- * the content of /proc/cpuinfo and return it as a heap-allocated
- * string that must be freed by the caller.
- *
- * Return NULL if not found
- */
-static char*
-extract_cpuinfo_field(char* buffer, int buflen, const char* field)
-{
- int fieldlen = strlen(field);
- char* bufend = buffer + buflen;
- char* result = NULL;
- int len, ignore;
- const char *p, *q;
-
- /* Look for first field occurence, and ensures it starts the line.
- */
- p = buffer;
- bufend = buffer + buflen;
- for (;;) {
- p = memmem(p, bufend-p, field, fieldlen);
- if (p == NULL)
- goto EXIT;
-
- if (p == buffer || p[-1] == '\n')
- break;
-
- p += fieldlen;
- }
-
- /* Skip to the first column followed by a space */
- p += fieldlen;
- p = memchr(p, ':', bufend-p);
- if (p == NULL || p[1] != ' ')
- goto EXIT;
-
- /* Find the end of the line */
- p += 2;
- q = memchr(p, '\n', bufend-p);
- if (q == NULL)
- q = bufend;
-
- /* Copy the line into a heap-allocated buffer */
- len = q-p;
- result = malloc(len+1);
- if (result == NULL)
- goto EXIT;
-
- memcpy(result, p, len);
- result[len] = '\0';
-
-EXIT:
- return result;
-}
-
-/* Count the number of occurences of a given field prefix in /proc/cpuinfo.
- */
-static int
-count_cpuinfo_field(char* buffer, int buflen, const char* field)
-{
- int fieldlen = strlen(field);
- const char* p = buffer;
- const char* bufend = buffer + buflen;
- const char* q;
- int count = 0;
-
- for (;;) {
- const char* q;
-
- p = memmem(p, bufend-p, field, fieldlen);
- if (p == NULL)
- break;
-
- /* Ensure that the field is at the start of a line */
- if (p > buffer && p[-1] != '\n') {
- p += fieldlen;
- continue;
- }
-
-
- /* skip any whitespace */
- q = p + fieldlen;
- while (q < bufend && (*q == ' ' || *q == '\t'))
- q++;
-
- /* we must have a colon now */
- if (q < bufend && *q == ':') {
- count += 1;
- q ++;
- }
- p = q;
- }
-
- return count;
-}
-
-/* Like strlen(), but for constant string literals */
-#define STRLEN_CONST(x) ((sizeof(x)-1)
-
-
-/* Checks that a space-separated list of items contains one given 'item'.
- * Returns 1 if found, 0 otherwise.
- */
-static int
-has_list_item(const char* list, const char* item)
-{
- const char* p = list;
- int itemlen = strlen(item);
-
- if (list == NULL)
- return 0;
-
- while (*p) {
- const char* q;
-
- /* skip spaces */
- while (*p == ' ' || *p == '\t')
- p++;
-
- /* find end of current list item */
- q = p;
- while (*q && *q != ' ' && *q != '\t')
- q++;
-
- if (itemlen == q-p && !memcmp(p, item, itemlen))
- return 1;
-
- /* skip to next item */
- p = q;
- }
- return 0;
-}
-
-
-static void
-android_cpuInit(void)
-{
- char cpuinfo[4096];
- int cpuinfo_len;
-
- g_cpuFamily = DEFAULT_CPU_FAMILY;
- g_cpuFeatures = 0;
- g_cpuCount = 1;
-
- cpuinfo_len = read_file("/proc/cpuinfo", cpuinfo, sizeof cpuinfo);
- D("cpuinfo_len is (%d):\n%.*s\n", cpuinfo_len,
- cpuinfo_len >= 0 ? cpuinfo_len : 0, cpuinfo);
-
- if (cpuinfo_len < 0) /* should not happen */ {
- return;
- }
-
- /* Count the CPU cores, the value may be 0 for single-core CPUs */
- g_cpuCount = count_cpuinfo_field(cpuinfo, cpuinfo_len, "processor");
- if (g_cpuCount == 0) {
- g_cpuCount = count_cpuinfo_field(cpuinfo, cpuinfo_len, "Processor");
- if (g_cpuCount == 0) {
- g_cpuCount = 1;
- }
- }
-
- D("found cpuCount = %d\n", g_cpuCount);
-
-#ifdef __ARM_ARCH__
- {
- char* features = NULL;
- char* architecture = NULL;
-
- /* Extract architecture from the "CPU Architecture" field.
- * The list is well-known, unlike the the output of
- * the 'Processor' field which can vary greatly.
- *
- * See the definition of the 'proc_arch' array in
- * $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in
- * same file.
- */
- char* cpuArch = extract_cpuinfo_field(cpuinfo, cpuinfo_len, "CPU architecture");
-
- if (cpuArch != NULL) {
- char* end;
- long archNumber;
- int hasARMv7 = 0;
-
- D("found cpuArch = '%s'\n", cpuArch);
-
- /* read the initial decimal number, ignore the rest */
- archNumber = strtol(cpuArch, &end, 10);
-
- /* Here we assume that ARMv8 will be upwards compatible with v7
- * in the future. Unfortunately, there is no 'Features' field to
- * indicate that Thumb-2 is supported.
- */
- if (end > cpuArch && archNumber >= 7) {
- hasARMv7 = 1;
- }
-
- /* Unfortunately, it seems that certain ARMv6-based CPUs
- * report an incorrect architecture number of 7!
- *
- * See http://code.google.com/p/android/issues/detail?id=10812
- *
- * We try to correct this by looking at the 'elf_format'
- * field reported by the 'Processor' field, which is of the
- * form of "(v7l)" for an ARMv7-based CPU, and "(v6l)" for
- * an ARMv6-one.
- */
- if (hasARMv7) {
- char* cpuProc = extract_cpuinfo_field(cpuinfo, cpuinfo_len,
- "Processor");
- if (cpuProc != NULL) {
- D("found cpuProc = '%s'\n", cpuProc);
- if (has_list_item(cpuProc, "(v6l)")) {
- D("CPU processor and architecture mismatch!!\n");
- hasARMv7 = 0;
- }
- free(cpuProc);
- }
- }
-
- if (hasARMv7) {
- g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_ARMv7;
- }
-
- /* The LDREX / STREX instructions are available from ARMv6 */
- if (archNumber >= 6) {
- g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_LDREX_STREX;
- }
-
- free(cpuArch);
- }
-
- /* Extract the list of CPU features from 'Features' field */
- char* cpuFeatures = extract_cpuinfo_field(cpuinfo, cpuinfo_len, "Features");
-
- if (cpuFeatures != NULL) {
-
- D("found cpuFeatures = '%s'\n", cpuFeatures);
-
- if (has_list_item(cpuFeatures, "vfpv3"))
- g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_VFPv3;
-
- else if (has_list_item(cpuFeatures, "vfpv3d16"))
- g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_VFPv3;
-
- if (has_list_item(cpuFeatures, "neon")) {
- /* Note: Certain kernels only report neon but not vfpv3
- * in their features list. However, ARM mandates
- * that if Neon is implemented, so must be VFPv3
- * so always set the flag.
- */
- g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_NEON |
- ANDROID_CPU_ARM_FEATURE_VFPv3;
- }
- free(cpuFeatures);
- }
- }
-#endif /* __ARM_ARCH__ */
-
-#ifdef __i386__
- g_cpuFamily = ANDROID_CPU_FAMILY_X86;
-
- int regs[4];
-
-/* According to http://en.wikipedia.org/wiki/CPUID */
-#define VENDOR_INTEL_b 0x756e6547
-#define VENDOR_INTEL_c 0x6c65746e
-#define VENDOR_INTEL_d 0x49656e69
-
- x86_cpuid(0, regs);
- int vendorIsIntel = (regs[1] == VENDOR_INTEL_b &&
- regs[2] == VENDOR_INTEL_c &&
- regs[3] == VENDOR_INTEL_d);
-
- x86_cpuid(1, regs);
- if ((regs[2] & (1 << 9)) != 0) {
- g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_SSSE3;
- }
- if ((regs[2] & (1 << 23)) != 0) {
- g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_POPCNT;
- }
- if (vendorIsIntel && (regs[2] & (1 << 22)) != 0) {
- g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_MOVBE;
- }
-#endif
-}
-
-
-AndroidCpuFamily
-android_getCpuFamily(void)
-{
- pthread_once(&g_once, android_cpuInit);
- return g_cpuFamily;
-}
-
-
-uint64_t
-android_getCpuFeatures(void)
-{
- pthread_once(&g_once, android_cpuInit);
- return g_cpuFeatures;
-}
-
-
-int
-android_getCpuCount(void)
-{
- pthread_once(&g_once, android_cpuInit);
- return g_cpuCount;
-}
diff --git a/src/dsp/cpu-features.h b/src/dsp/cpu-features.h
deleted file mode 100644
index f20c0bc4..00000000
--- a/src/dsp/cpu-features.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-// You can download Android source at
-// http://source.android.com/source/downloading.html
-// Original files are in ndk/sources/android/cpufeatures
-// Revision is Change-Id: I9a0629efba36a6023f05e5f092e7addcc1b7d2a9
-
-#ifndef CPU_FEATURES_H
-#define CPU_FEATURES_H
-
-#include <sys/cdefs.h>
-#include <stdint.h>
-
-__BEGIN_DECLS
-
-typedef enum {
- ANDROID_CPU_FAMILY_UNKNOWN = 0,
- ANDROID_CPU_FAMILY_ARM,
- ANDROID_CPU_FAMILY_X86,
-
- ANDROID_CPU_FAMILY_MAX /* do not remove */
-
-} AndroidCpuFamily;
-
-/* Return family of the device's CPU */
-extern AndroidCpuFamily android_getCpuFamily(void);
-
-enum {
- ANDROID_CPU_ARM_FEATURE_ARMv7 = (1 << 0),
- ANDROID_CPU_ARM_FEATURE_VFPv3 = (1 << 1),
- ANDROID_CPU_ARM_FEATURE_NEON = (1 << 2),
- ANDROID_CPU_ARM_FEATURE_LDREX_STREX = (1 << 3),
-};
-
-enum {
- ANDROID_CPU_X86_FEATURE_SSSE3 = (1 << 0),
- ANDROID_CPU_X86_FEATURE_POPCNT = (1 << 1),
- ANDROID_CPU_X86_FEATURE_MOVBE = (1 << 2),
-};
-
-extern uint64_t android_getCpuFeatures(void);
-
-/* Return the number of CPU cores detected on this device. */
-extern int android_getCpuCount(void);
-
-__END_DECLS
-
-#endif /* CPU_FEATURES_H */
diff --git a/src/dsp/cpu.c b/src/dsp/cpu.c
index bf9ae0c7..2ee7812d 100644
--- a/src/dsp/cpu.c
+++ b/src/dsp/cpu.c
@@ -11,9 +11,9 @@
#include "./dsp.h"
-#if defined(__ANDROID__)
-#include "./cpu-features.h"
-#endif
+//#if defined(__ANDROID__)
+//#include <cpu-features.h>
+//#endif
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
@@ -57,17 +57,17 @@ static int x86CPUInfo(CPUFeature feature) {
return 0;
}
VP8CPUInfo VP8GetCPUInfo = x86CPUInfo;
-#elif defined(WEBP_ANDROID_NEON)
-static int AndroidCPUInfo(CPUFeature feature) {
- const AndroidCpuFamily cpu_family = android_getCpuFamily();
- const uint64_t cpu_features = android_getCpuFeatures();
- if (feature == kNEON) {
- return (cpu_family == ANDROID_CPU_FAMILY_ARM &&
- 0 != (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON));
- }
- return 0;
-}
-VP8CPUInfo VP8GetCPUInfo = AndroidCPUInfo;
+//#elif defined(WEBP_ANDROID_NEON)
+//static int AndroidCPUInfo(CPUFeature feature) {
+// const AndroidCpuFamily cpu_family = android_getCpuFamily();
+// const uint64_t cpu_features = android_getCpuFeatures();
+// if (feature == kNEON) {
+// return (cpu_family == ANDROID_CPU_FAMILY_ARM &&
+// 0 != (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON));
+// }
+// return 0;
+//}
+//VP8CPUInfo VP8GetCPUInfo = AndroidCPUInfo;
#elif defined(__ARM_NEON__)
// define a dummy function to enable turning off NEON at runtime by setting
// VP8DecGetCPUInfo = NULL
diff --git a/src/dsp/dec_neon.c b/src/dsp/dec_neon.c
index 5d7cff15..ec824b79 100644
--- a/src/dsp/dec_neon.c
+++ b/src/dsp/dec_neon.c
@@ -12,14 +12,14 @@
#include "./dsp.h"
-#if defined(__cplusplus) || defined(c_plusplus)
-extern "C" {
-#endif
-
#if defined(WEBP_USE_NEON)
#include "../dec/vp8i.h"
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
#define QRegs "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", \
"q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
@@ -155,9 +155,6 @@ static void SimpleHFilter16iNEON(uint8_t* p, int stride, int thresh) {
}
}
-//-----------------------------------------------------------------------------
-// Inverse transforms (Paragraph 14.4)
-
static void TransformOneNEON(const int16_t *in, uint8_t *dst) {
const int kBPS = BPS;
const int16_t constants[] = {20091, 17734, 0, 0};
@@ -314,92 +311,19 @@ static void TransformTwoNEON(const int16_t* in, uint8_t* dst, int do_two) {
}
}
-static void TransformWHT(const int16_t* in, int16_t* out) {
- const int kStep = 32; // The store is only incrementing the pointer as if we
- // had stored a single byte.
- __asm__ volatile (
- // part 1
- // load data into q0, q1
- "vld1.16 {q0, q1}, [%[in]] \n"
-
- "vaddl.s16 q2, d0, d3 \n" // a0 = in[0] + in[12]
- "vaddl.s16 q3, d1, d2 \n" // a1 = in[4] + in[8]
- "vsubl.s16 q4, d1, d2 \n" // a2 = in[4] - in[8]
- "vsubl.s16 q5, d0, d3 \n" // a3 = in[0] - in[12]
-
- "vadd.s32 q0, q2, q3 \n" // tmp[0] = a0 + a1
- "vsub.s32 q2, q2, q3 \n" // tmp[8] = a0 - a1
- "vadd.s32 q1, q5, q4 \n" // tmp[4] = a3 + a2
- "vsub.s32 q3, q5, q4 \n" // tmp[12] = a3 - a2
-
- // Transpose
- // q0 = tmp[0, 4, 8, 12], q1 = tmp[2, 6, 10, 14]
- // q2 = tmp[1, 5, 9, 13], q3 = tmp[3, 7, 11, 15]
- "vswp d1, d4 \n" // vtrn.64 q0, q2
- "vswp d3, d6 \n" // vtrn.64 q1, q3
- "vtrn.32 q0, q1 \n"
- "vtrn.32 q2, q3 \n"
-
- "vmov.s32 q4, #3 \n" // dc = 3
- "vadd.s32 q0, q0, q4 \n" // dc = tmp[0] + 3
- "vadd.s32 q6, q0, q3 \n" // a0 = dc + tmp[3]
- "vadd.s32 q7, q1, q2 \n" // a1 = tmp[1] + tmp[2]
- "vsub.s32 q8, q1, q2 \n" // a2 = tmp[1] - tmp[2]
- "vsub.s32 q9, q0, q3 \n" // a3 = dc - tmp[3]
-
- "vadd.s32 q0, q6, q7 \n"
- "vshrn.s32 d0, q0, #3 \n" // (a0 + a1) >> 3
- "vadd.s32 q1, q9, q8 \n"
- "vshrn.s32 d1, q1, #3 \n" // (a3 + a2) >> 3
- "vsub.s32 q2, q6, q7 \n"
- "vshrn.s32 d2, q2, #3 \n" // (a0 - a1) >> 3
- "vsub.s32 q3, q9, q8 \n"
- "vshrn.s32 d3, q3, #3 \n" // (a3 - a2) >> 3
-
- // set the results to output
- "vst1.16 d0[0], [%[out]], %[kStep] \n"
- "vst1.16 d1[0], [%[out]], %[kStep] \n"
- "vst1.16 d2[0], [%[out]], %[kStep] \n"
- "vst1.16 d3[0], [%[out]], %[kStep] \n"
- "vst1.16 d0[1], [%[out]], %[kStep] \n"
- "vst1.16 d1[1], [%[out]], %[kStep] \n"
- "vst1.16 d2[1], [%[out]], %[kStep] \n"
- "vst1.16 d3[1], [%[out]], %[kStep] \n"
- "vst1.16 d0[2], [%[out]], %[kStep] \n"
- "vst1.16 d1[2], [%[out]], %[kStep] \n"
- "vst1.16 d2[2], [%[out]], %[kStep] \n"
- "vst1.16 d3[2], [%[out]], %[kStep] \n"
- "vst1.16 d0[3], [%[out]], %[kStep] \n"
- "vst1.16 d1[3], [%[out]], %[kStep] \n"
- "vst1.16 d2[3], [%[out]], %[kStep] \n"
- "vst1.16 d3[3], [%[out]], %[kStep] \n"
-
- : [out] "+r"(out) // modified registers
- : [in] "r"(in), [kStep] "r"(kStep) // constants
- : "memory", "q0", "q1", "q2", "q3", "q4",
- "q5", "q6", "q7", "q8", "q9" // clobbered
- );
-}
-
-#endif // WEBP_USE_NEON
-
-//------------------------------------------------------------------------------
-// Entry point
-
extern void VP8DspInitNEON(void);
void VP8DspInitNEON(void) {
-#if defined(WEBP_USE_NEON)
VP8Transform = TransformTwoNEON;
- VP8TransformWHT = TransformWHT;
VP8SimpleVFilter16 = SimpleVFilter16NEON;
VP8SimpleHFilter16 = SimpleHFilter16NEON;
VP8SimpleVFilter16i = SimpleVFilter16iNEON;
VP8SimpleHFilter16i = SimpleHFilter16iNEON;
-#endif // WEBP_USE_NEON
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
+
+#endif // WEBP_USE_NEON
diff --git a/src/dsp/dec_sse2.c b/src/dsp/dec_sse2.c
index 1cac1b84..472b68ec 100644
--- a/src/dsp/dec_sse2.c
+++ b/src/dsp/dec_sse2.c
@@ -12,15 +12,15 @@
#include "./dsp.h"
-#if defined(__cplusplus) || defined(c_plusplus)
-extern "C" {
-#endif
-
#if defined(WEBP_USE_SSE2)
#include <emmintrin.h>
#include "../dec/vp8i.h"
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
//------------------------------------------------------------------------------
// Transforms (Paragraph 14.4)
@@ -194,7 +194,7 @@ static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) {
// Add inverse transform to 'dst' and store.
{
- const __m128i zero = _mm_setzero_si128();
+ const __m128i zero = _mm_set1_epi16(0);
// Load the reference(s).
__m128i dst0, dst1, dst2, dst3;
if (do_two) {
@@ -278,14 +278,14 @@ static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) {
#define GET_NOTHEV(p1, p0, q0, q1, hev_thresh, not_hev) { \
const __m128i zero = _mm_setzero_si128(); \
- const __m128i t_1 = MM_ABS(p1, p0); \
- const __m128i t_2 = MM_ABS(q1, q0); \
+ const __m128i t1 = MM_ABS(p1, p0); \
+ const __m128i t2 = MM_ABS(q1, q0); \
\
const __m128i h = _mm_set1_epi8(hev_thresh); \
- const __m128i t_3 = _mm_subs_epu8(t_1, h); /* abs(p1 - p0) - hev_tresh */ \
- const __m128i t_4 = _mm_subs_epu8(t_2, h); /* abs(q1 - q0) - hev_tresh */ \
+ const __m128i t3 = _mm_subs_epu8(t1, h); /* abs(p1 - p0) - hev_tresh */ \
+ const __m128i t4 = _mm_subs_epu8(t2, h); /* abs(q1 - q0) - hev_tresh */ \
\
- not_hev = _mm_or_si128(t_3, t_4); \
+ not_hev = _mm_or_si128(t3, t4); \
not_hev = _mm_cmpeq_epi8(not_hev, zero); /* not_hev <= t1 && not_hev <= t2 */\
}
@@ -314,13 +314,13 @@ static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) {
// Updates values of 2 pixels at MB edge during complex filtering.
// Update operations:
-// q = q - delta and p = p + delta; where delta = [(a_hi >> 7), (a_lo >> 7)]
+// q = q - a and p = p + a; where a = [(a_hi >> 7), (a_lo >> 7)]
#define UPDATE_2PIXELS(pi, qi, a_lo, a_hi) { \
const __m128i a_lo7 = _mm_srai_epi16(a_lo, 7); \
const __m128i a_hi7 = _mm_srai_epi16(a_hi, 7); \
- const __m128i delta = _mm_packs_epi16(a_lo7, a_hi7); \
- pi = _mm_adds_epi8(pi, delta); \
- qi = _mm_subs_epi8(qi, delta); \
+ const __m128i a = _mm_packs_epi16(a_lo7, a_hi7); \
+ pi = _mm_adds_epi8(pi, a); \
+ qi = _mm_subs_epi8(qi, a); \
}
static void NeedsFilter(const __m128i* p1, const __m128i* p0, const __m128i* q0,
@@ -876,15 +876,9 @@ static void HFilter8iSSE2(uint8_t* u, uint8_t* v, int stride,
Store16x4(u, v, stride, &p1, &p0, &q0, &q1);
}
-#endif // WEBP_USE_SSE2
-
-//------------------------------------------------------------------------------
-// Entry point
-
extern void VP8DspInitSSE2(void);
void VP8DspInitSSE2(void) {
-#if defined(WEBP_USE_SSE2)
VP8Transform = TransformSSE2;
VP8VFilter16 = VFilter16SSE2;
@@ -900,9 +894,10 @@ void VP8DspInitSSE2(void) {
VP8SimpleHFilter16 = SimpleHFilter16SSE2;
VP8SimpleVFilter16i = SimpleVFilter16iSSE2;
VP8SimpleHFilter16i = SimpleHFilter16iSSE2;
-#endif // WEBP_USE_SSE2
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
+
+#endif // WEBP_USE_SSE2
diff --git a/src/dsp/dsp.h b/src/dsp/dsp.h
index 9c186e55..3aad3095 100644
--- a/src/dsp/dsp.h
+++ b/src/dsp/dsp.h
@@ -29,9 +29,9 @@ extern "C" {
#define WEBP_USE_SSE2
#endif
-#if defined(__ANDROID__) && defined(__ARM_ARCH_7A__)
-#define WEBP_ANDROID_NEON // Android targets that might support NEON
-#endif
+//#if defined(__ANDROID__) && defined(__ARM_ARCH_7A__)
+//#define WEBP_ANDROID_NEON // Android targets that might support NEON
+//#endif
#if defined(__ARM_NEON__) || defined(WEBP_ANDROID_NEON)
#define WEBP_USE_NEON
@@ -49,6 +49,8 @@ extern VP8CPUInfo VP8GetCPUInfo;
//------------------------------------------------------------------------------
// Encoding
+int VP8GetAlpha(const int histo[]);
+
// Transforms
// VP8Idct: Does one of two inverse transforms. If do_two is set, the transforms
// will be done for (ref, in, dst) and (ref + 4, in + 16, dst + 4).
@@ -83,11 +85,10 @@ typedef int (*VP8QuantizeBlock)(int16_t in[16], int16_t out[16],
int n, const struct VP8Matrix* const mtx);
extern VP8QuantizeBlock VP8EncQuantizeBlock;
-// Collect histogram for susceptibility calculation and accumulate in histo[].
-struct VP8Histogram;
-typedef void (*VP8CHisto)(const uint8_t* ref, const uint8_t* pred,
- int start_block, int end_block,
- struct VP8Histogram* const histo);
+// Compute susceptibility based on DCT-coeff histograms:
+// the higher, the "easier" the macroblock is to compress.
+typedef int (*VP8CHisto)(const uint8_t* ref, const uint8_t* pred,
+ int start_block, int end_block);
extern const int VP8DspScan[16 + 4 + 4];
extern VP8CHisto VP8CollectHistogram;
@@ -103,7 +104,7 @@ extern VP8DecIdct2 VP8Transform;
extern VP8DecIdct VP8TransformUV;
extern VP8DecIdct VP8TransformDC;
extern VP8DecIdct VP8TransformDCUV;
-extern VP8WHT VP8TransformWHT;
+extern void (*VP8TransformWHT)(const int16_t* in, int16_t* out);
// *dst is the destination block, with stride BPS. Boundary samples are
// assumed accessible when needed.
@@ -158,9 +159,6 @@ extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
// Initializes SSE2 version of the fancy upsamplers.
void WebPInitUpsamplersSSE2(void);
-// NEON version
-void WebPInitUpsamplersNEON(void);
-
#endif // FANCY_UPSAMPLING
// Point-sampling methods.
@@ -202,7 +200,6 @@ extern void (*WebPApplyAlphaMultiply4444)(
void WebPInitPremultiply(void);
void WebPInitPremultiplySSE2(void); // should not be called directly.
-void WebPInitPremultiplyNEON(void);
//------------------------------------------------------------------------------
diff --git a/src/dsp/enc.c b/src/dsp/enc.c
index ae2c830a..02234564 100644
--- a/src/dsp/enc.c
+++ b/src/dsp/enc.c
@@ -17,18 +17,31 @@
extern "C" {
#endif
-static WEBP_INLINE uint8_t clip_8b(int v) {
- return (!(v & ~0xff)) ? v : (v < 0) ? 0 : 255;
-}
-
-static WEBP_INLINE int clip_max(int v, int max) {
- return (v > max) ? max : v;
-}
-
//------------------------------------------------------------------------------
// Compute susceptibility based on DCT-coeff histograms:
// the higher, the "easier" the macroblock is to compress.
+static int ClipAlpha(int alpha) {
+ return alpha < 0 ? 0 : alpha > 255 ? 255 : alpha;
+}
+
+int VP8GetAlpha(const int histo[MAX_COEFF_THRESH + 1]) {
+ int num = 0, den = 0, val = 0;
+ int k;
+ int alpha;
+ // note: changing this loop to avoid the numerous "k + 1" slows things down.
+ for (k = 0; k < MAX_COEFF_THRESH; ++k) {
+ if (histo[k + 1]) {
+ val += histo[k + 1];
+ num += val * (k + 1);
+ den += (k + 1) * (k + 1);
+ }
+ }
+ // we scale the value to a usable [0..255] range
+ alpha = den ? 10 * num / den - 5 : 0;
+ return ClipAlpha(alpha);
+}
+
const int VP8DspScan[16 + 4 + 4] = {
// Luma
0 + 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS,
@@ -40,23 +53,27 @@ const int VP8DspScan[16 + 4 + 4] = {
8 + 0 * BPS, 12 + 0 * BPS, 8 + 4 * BPS, 12 + 4 * BPS // V
};
-static void CollectHistogram(const uint8_t* ref, const uint8_t* pred,
- int start_block, int end_block,
- VP8Histogram* const histo) {
- int j;
+static int CollectHistogram(const uint8_t* ref, const uint8_t* pred,
+ int start_block, int end_block) {
+ int histo[MAX_COEFF_THRESH + 1] = { 0 };
+ int16_t out[16];
+ int j, k;
for (j = start_block; j < end_block; ++j) {
- int k;
- int16_t out[16];
-
VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out);
- // Convert coefficients to bin.
+ // Convert coefficients to bin (within out[]).
for (k = 0; k < 16; ++k) {
- const int v = abs(out[k]) >> 3; // TODO(skal): add rounding?
- const int clipped_value = clip_max(v, MAX_COEFF_THRESH);
- histo->distribution[clipped_value]++;
+ const int v = abs(out[k]) >> 2;
+ out[k] = (v > MAX_COEFF_THRESH) ? MAX_COEFF_THRESH : v;
+ }
+
+ // Use bin to update histogram.
+ for (k = 0; k < 16; ++k) {
+ histo[out[k]]++;
}
}
+
+ return VP8GetAlpha(histo);
}
//------------------------------------------------------------------------------
@@ -72,12 +89,15 @@ static void InitTables(void) {
if (!tables_ok) {
int i;
for (i = -255; i <= 255 + 255; ++i) {
- clip1[255 + i] = clip_8b(i);
+ clip1[255 + i] = (i < 0) ? 0 : (i > 255) ? 255 : i;
}
tables_ok = 1;
}
}
+static WEBP_INLINE uint8_t clip_8b(int v) {
+ return (!(v & ~0xff)) ? v : v < 0 ? 0 : 255;
+}
//------------------------------------------------------------------------------
// Transforms (Paragraph 14.4)
@@ -134,25 +154,25 @@ static void FTransform(const uint8_t* src, const uint8_t* ref, int16_t* out) {
int i;
int tmp[16];
for (i = 0; i < 4; ++i, src += BPS, ref += BPS) {
- const int d0 = src[0] - ref[0]; // 9bit dynamic range ([-255,255])
+ const int d0 = src[0] - ref[0];
const int d1 = src[1] - ref[1];
const int d2 = src[2] - ref[2];
const int d3 = src[3] - ref[3];
- const int a0 = (d0 + d3); // 10b [-510,510]
- const int a1 = (d1 + d2);
- const int a2 = (d1 - d2);
- const int a3 = (d0 - d3);
- tmp[0 + i * 4] = (a0 + a1) << 3; // 14b [-8160,8160]
- tmp[1 + i * 4] = (a2 * 2217 + a3 * 5352 + 1812) >> 9; // [-7536,7542]
- tmp[2 + i * 4] = (a0 - a1) << 3;
- tmp[3 + i * 4] = (a3 * 2217 - a2 * 5352 + 937) >> 9;
+ const int a0 = (d0 + d3) << 3;
+ const int a1 = (d1 + d2) << 3;
+ const int a2 = (d1 - d2) << 3;
+ const int a3 = (d0 - d3) << 3;
+ tmp[0 + i * 4] = (a0 + a1);
+ tmp[1 + i * 4] = (a2 * 2217 + a3 * 5352 + 14500) >> 12;
+ tmp[2 + i * 4] = (a0 - a1);
+ tmp[3 + i * 4] = (a3 * 2217 - a2 * 5352 + 7500) >> 12;
}
for (i = 0; i < 4; ++i) {
- const int a0 = (tmp[0 + i] + tmp[12 + i]); // 15b
+ const int a0 = (tmp[0 + i] + tmp[12 + i]);
const int a1 = (tmp[4 + i] + tmp[ 8 + i]);
const int a2 = (tmp[4 + i] - tmp[ 8 + i]);
const int a3 = (tmp[0 + i] - tmp[12 + i]);
- out[0 + i] = (a0 + a1 + 7) >> 4; // 12b
+ out[0 + i] = (a0 + a1 + 7) >> 4;
out[4 + i] = ((a2 * 2217 + a3 * 5352 + 12000) >> 16) + (a3 != 0);
out[8 + i] = (a0 - a1 + 7) >> 4;
out[12+ i] = ((a3 * 2217 - a2 * 5352 + 51000) >> 16);
@@ -569,30 +589,30 @@ static int TTransform(const uint8_t* in, const uint16_t* w) {
int i;
// horizontal pass
for (i = 0; i < 4; ++i, in += BPS) {
- const int a0 = in[0] + in[2];
- const int a1 = in[1] + in[3];
- const int a2 = in[1] - in[3];
- const int a3 = in[0] - in[2];
- tmp[0 + i * 4] = a0 + a1;
+ const int a0 = (in[0] + in[2]) << 2;
+ const int a1 = (in[1] + in[3]) << 2;
+ const int a2 = (in[1] - in[3]) << 2;
+ const int a3 = (in[0] - in[2]) << 2;
+ tmp[0 + i * 4] = a0 + a1 + (a0 != 0);
tmp[1 + i * 4] = a3 + a2;
tmp[2 + i * 4] = a3 - a2;
tmp[3 + i * 4] = a0 - a1;
}
// vertical pass
for (i = 0; i < 4; ++i, ++w) {
- const int a0 = tmp[0 + i] + tmp[8 + i];
- const int a1 = tmp[4 + i] + tmp[12+ i];
- const int a2 = tmp[4 + i] - tmp[12+ i];
- const int a3 = tmp[0 + i] - tmp[8 + i];
+ const int a0 = (tmp[0 + i] + tmp[8 + i]);
+ const int a1 = (tmp[4 + i] + tmp[12+ i]);
+ const int a2 = (tmp[4 + i] - tmp[12+ i]);
+ const int a3 = (tmp[0 + i] - tmp[8 + i]);
const int b0 = a0 + a1;
const int b1 = a3 + a2;
const int b2 = a3 - a2;
const int b3 = a0 - a1;
-
- sum += w[ 0] * abs(b0);
- sum += w[ 4] * abs(b1);
- sum += w[ 8] * abs(b2);
- sum += w[12] * abs(b3);
+ // abs((b + (b<0) + 3) >> 3) = (abs(b) + 3) >> 3
+ sum += w[ 0] * ((abs(b0) + 3) >> 3);
+ sum += w[ 4] * ((abs(b1) + 3) >> 3);
+ sum += w[ 8] * ((abs(b2) + 3) >> 3);
+ sum += w[12] * ((abs(b3) + 3) >> 3);
}
return sum;
}
@@ -601,7 +621,7 @@ static int Disto4x4(const uint8_t* const a, const uint8_t* const b,
const uint16_t* const w) {
const int sum1 = TTransform(a, w);
const int sum2 = TTransform(b, w);
- return abs(sum2 - sum1) >> 5;
+ return (abs(sum2 - sum1) + 8) >> 4;
}
static int Disto16x16(const uint8_t* const a, const uint8_t* const b,
@@ -686,7 +706,6 @@ VP8QuantizeBlock VP8EncQuantizeBlock;
VP8BlockCopy VP8Copy4x4;
extern void VP8EncDspInitSSE2(void);
-extern void VP8EncDspInitNEON(void);
void VP8EncDspInit(void) {
InitTables();
@@ -715,10 +734,6 @@ void VP8EncDspInit(void) {
if (VP8GetCPUInfo(kSSE2)) {
VP8EncDspInitSSE2();
}
-#elif defined(WEBP_USE_NEON)
- if (VP8GetCPUInfo(kNEON)) {
- VP8EncDspInitNEON();
- }
#endif
}
}
diff --git a/src/dsp/enc_neon.c b/src/dsp/enc_neon.c
deleted file mode 100644
index b5a1fbaf..00000000
--- a/src/dsp/enc_neon.c
+++ /dev/null
@@ -1,661 +0,0 @@
-// Copyright 2012 Google Inc. All Rights Reserved.
-//
-// This code is licensed under the same terms as WebM:
-// Software License Agreement: http://www.webmproject.org/license/software/
-// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
-// -----------------------------------------------------------------------------
-//
-// ARM NEON version of speed-critical encoding functions.
-//
-// adapted from libvpx (http://www.webmproject.org/code/)
-
-#include "./dsp.h"
-
-#if defined(__cplusplus) || defined(c_plusplus)
-extern "C" {
-#endif
-
-#if defined(WEBP_USE_NEON)
-
-#include "../enc/vp8enci.h"
-
-//------------------------------------------------------------------------------
-// Transforms (Paragraph 14.4)
-
-// Inverse transform.
-// This code is pretty much the same as TransformOneNEON in the decoder, except
-// for subtraction to *ref. See the comments there for algorithmic explanations.
-static void ITransformOne(const uint8_t* ref,
- const int16_t* in, uint8_t* dst) {
- const int kBPS = BPS;
- const int16_t kC1C2[] = { 20091, 17734, 0, 0 }; // kC1 / (kC2 >> 1) / 0 / 0
-
- __asm__ volatile (
- "vld1.16 {q1, q2}, [%[in]] \n"
- "vld1.16 {d0}, [%[kC1C2]] \n"
-
- // d2: in[0]
- // d3: in[8]
- // d4: in[4]
- // d5: in[12]
- "vswp d3, d4 \n"
-
- // q8 = {in[4], in[12]} * kC1 * 2 >> 16
- // q9 = {in[4], in[12]} * kC2 >> 16
- "vqdmulh.s16 q8, q2, d0[0] \n"
- "vqdmulh.s16 q9, q2, d0[1] \n"
-
- // d22 = a = in[0] + in[8]
- // d23 = b = in[0] - in[8]
- "vqadd.s16 d22, d2, d3 \n"
- "vqsub.s16 d23, d2, d3 \n"
-
- // q8 = in[4]/[12] * kC1 >> 16
- "vshr.s16 q8, q8, #1 \n"
-
- // Add {in[4], in[12]} back after the multiplication.
- "vqadd.s16 q8, q2, q8 \n"
-
- // d20 = c = in[4]*kC2 - in[12]*kC1
- // d21 = d = in[4]*kC1 + in[12]*kC2
- "vqsub.s16 d20, d18, d17 \n"
- "vqadd.s16 d21, d19, d16 \n"
-
- // d2 = tmp[0] = a + d
- // d3 = tmp[1] = b + c
- // d4 = tmp[2] = b - c
- // d5 = tmp[3] = a - d
- "vqadd.s16 d2, d22, d21 \n"
- "vqadd.s16 d3, d23, d20 \n"
- "vqsub.s16 d4, d23, d20 \n"
- "vqsub.s16 d5, d22, d21 \n"
-
- "vzip.16 q1, q2 \n"
- "vzip.16 q1, q2 \n"
-
- "vswp d3, d4 \n"
-
- // q8 = {tmp[4], tmp[12]} * kC1 * 2 >> 16
- // q9 = {tmp[4], tmp[12]} * kC2 >> 16
- "vqdmulh.s16 q8, q2, d0[0] \n"
- "vqdmulh.s16 q9, q2, d0[1] \n"
-
- // d22 = a = tmp[0] + tmp[8]
- // d23 = b = tmp[0] - tmp[8]
- "vqadd.s16 d22, d2, d3 \n"
- "vqsub.s16 d23, d2, d3 \n"
-
- "vshr.s16 q8, q8, #1 \n"
- "vqadd.s16 q8, q2, q8 \n"
-
- // d20 = c = in[4]*kC2 - in[12]*kC1
- // d21 = d = in[4]*kC1 + in[12]*kC2
- "vqsub.s16 d20, d18, d17 \n"
- "vqadd.s16 d21, d19, d16 \n"
-
- // d2 = tmp[0] = a + d
- // d3 = tmp[1] = b + c
- // d4 = tmp[2] = b - c
- // d5 = tmp[3] = a - d
- "vqadd.s16 d2, d22, d21 \n"
- "vqadd.s16 d3, d23, d20 \n"
- "vqsub.s16 d4, d23, d20 \n"
- "vqsub.s16 d5, d22, d21 \n"
-
- "vld1.32 d6[0], [%[ref]], %[kBPS] \n"
- "vld1.32 d6[1], [%[ref]], %[kBPS] \n"
- "vld1.32 d7[0], [%[ref]], %[kBPS] \n"
- "vld1.32 d7[1], [%[ref]], %[kBPS] \n"
-
- "sub %[ref], %[ref], %[kBPS], lsl #2 \n"
-
- // (val) + 4 >> 3
- "vrshr.s16 d2, d2, #3 \n"
- "vrshr.s16 d3, d3, #3 \n"
- "vrshr.s16 d4, d4, #3 \n"
- "vrshr.s16 d5, d5, #3 \n"
-
- "vzip.16 q1, q2 \n"
- "vzip.16 q1, q2 \n"
-
- // Must accumulate before saturating
- "vmovl.u8 q8, d6 \n"
- "vmovl.u8 q9, d7 \n"
-
- "vqadd.s16 q1, q1, q8 \n"
- "vqadd.s16 q2, q2, q9 \n"
-
- "vqmovun.s16 d0, q1 \n"
- "vqmovun.s16 d1, q2 \n"
-
- "vst1.32 d0[0], [%[dst]], %[kBPS] \n"
- "vst1.32 d0[1], [%[dst]], %[kBPS] \n"
- "vst1.32 d1[0], [%[dst]], %[kBPS] \n"
- "vst1.32 d1[1], [%[dst]] \n"
-
- : [in] "+r"(in), [dst] "+r"(dst) // modified registers
- : [kBPS] "r"(kBPS), [kC1C2] "r"(kC1C2), [ref] "r"(ref) // constants
- : "memory", "q0", "q1", "q2", "q8", "q9", "q10", "q11" // clobbered
- );
-}
-
-static void ITransform(const uint8_t* ref,
- const int16_t* in, uint8_t* dst, int do_two) {
- ITransformOne(ref, in, dst);
- if (do_two) {
- ITransformOne(ref + 4, in + 16, dst + 4);
- }
-}
-
-// Same code as dec_neon.c
-static void ITransformWHT(const int16_t* in, int16_t* out) {
- const int kStep = 32; // The store is only incrementing the pointer as if we
- // had stored a single byte.
- __asm__ volatile (
- // part 1
- // load data into q0, q1
- "vld1.16 {q0, q1}, [%[in]] \n"
-
- "vaddl.s16 q2, d0, d3 \n" // a0 = in[0] + in[12]
- "vaddl.s16 q3, d1, d2 \n" // a1 = in[4] + in[8]
- "vsubl.s16 q4, d1, d2 \n" // a2 = in[4] - in[8]
- "vsubl.s16 q5, d0, d3 \n" // a3 = in[0] - in[12]
-
- "vadd.s32 q0, q2, q3 \n" // tmp[0] = a0 + a1
- "vsub.s32 q2, q2, q3 \n" // tmp[8] = a0 - a1
- "vadd.s32 q1, q5, q4 \n" // tmp[4] = a3 + a2
- "vsub.s32 q3, q5, q4 \n" // tmp[12] = a3 - a2
-
- // Transpose
- // q0 = tmp[0, 4, 8, 12], q1 = tmp[2, 6, 10, 14]
- // q2 = tmp[1, 5, 9, 13], q3 = tmp[3, 7, 11, 15]
- "vswp d1, d4 \n" // vtrn.64 q0, q2
- "vswp d3, d6 \n" // vtrn.64 q1, q3
- "vtrn.32 q0, q1 \n"
- "vtrn.32 q2, q3 \n"
-
- "vmov.s32 q4, #3 \n" // dc = 3
- "vadd.s32 q0, q0, q4 \n" // dc = tmp[0] + 3
- "vadd.s32 q6, q0, q3 \n" // a0 = dc + tmp[3]
- "vadd.s32 q7, q1, q2 \n" // a1 = tmp[1] + tmp[2]
- "vsub.s32 q8, q1, q2 \n" // a2 = tmp[1] - tmp[2]
- "vsub.s32 q9, q0, q3 \n" // a3 = dc - tmp[3]
-
- "vadd.s32 q0, q6, q7 \n"
- "vshrn.s32 d0, q0, #3 \n" // (a0 + a1) >> 3
- "vadd.s32 q1, q9, q8 \n"
- "vshrn.s32 d1, q1, #3 \n" // (a3 + a2) >> 3
- "vsub.s32 q2, q6, q7 \n"
- "vshrn.s32 d2, q2, #3 \n" // (a0 - a1) >> 3
- "vsub.s32 q3, q9, q8 \n"
- "vshrn.s32 d3, q3, #3 \n" // (a3 - a2) >> 3
-
- // set the results to output
- "vst1.16 d0[0], [%[out]], %[kStep] \n"
- "vst1.16 d1[0], [%[out]], %[kStep] \n"
- "vst1.16 d2[0], [%[out]], %[kStep] \n"
- "vst1.16 d3[0], [%[out]], %[kStep] \n"
- "vst1.16 d0[1], [%[out]], %[kStep] \n"
- "vst1.16 d1[1], [%[out]], %[kStep] \n"
- "vst1.16 d2[1], [%[out]], %[kStep] \n"
- "vst1.16 d3[1], [%[out]], %[kStep] \n"
- "vst1.16 d0[2], [%[out]], %[kStep] \n"
- "vst1.16 d1[2], [%[out]], %[kStep] \n"
- "vst1.16 d2[2], [%[out]], %[kStep] \n"
- "vst1.16 d3[2], [%[out]], %[kStep] \n"
- "vst1.16 d0[3], [%[out]], %[kStep] \n"
- "vst1.16 d1[3], [%[out]], %[kStep] \n"
- "vst1.16 d2[3], [%[out]], %[kStep] \n"
- "vst1.16 d3[3], [%[out]], %[kStep] \n"
-
- : [out] "+r"(out) // modified registers
- : [in] "r"(in), [kStep] "r"(kStep) // constants
- : "memory", "q0", "q1", "q2", "q3", "q4",
- "q5", "q6", "q7", "q8", "q9" // clobbered
- );
-}
-
-// Forward transform.
-
-// adapted from vp8/encoder/arm/neon/shortfdct_neon.asm
-static const int16_t kCoeff16[] = {
- 5352, 5352, 5352, 5352, 2217, 2217, 2217, 2217
-};
-static const int32_t kCoeff32[] = {
- 1812, 1812, 1812, 1812,
- 937, 937, 937, 937,
- 12000, 12000, 12000, 12000,
- 51000, 51000, 51000, 51000
-};
-
-static void FTransform(const uint8_t* src, const uint8_t* ref,
- int16_t* out) {
- const int kBPS = BPS;
- const uint8_t* src_ptr = src;
- const uint8_t* ref_ptr = ref;
- const int16_t* coeff16 = kCoeff16;
- const int32_t* coeff32 = kCoeff32;
-
- __asm__ volatile (
- // load src into q4, q5 in high half
- "vld1.8 {d8}, [%[src_ptr]], %[kBPS] \n"
- "vld1.8 {d10}, [%[src_ptr]], %[kBPS] \n"
- "vld1.8 {d9}, [%[src_ptr]], %[kBPS] \n"
- "vld1.8 {d11}, [%[src_ptr]] \n"
-
- // load ref into q6, q7 in high half
- "vld1.8 {d12}, [%[ref_ptr]], %[kBPS] \n"
- "vld1.8 {d14}, [%[ref_ptr]], %[kBPS] \n"
- "vld1.8 {d13}, [%[ref_ptr]], %[kBPS] \n"
- "vld1.8 {d15}, [%[ref_ptr]] \n"
-
- // Pack the high values in to q4 and q6
- "vtrn.32 q4, q5 \n"
- "vtrn.32 q6, q7 \n"
-
- // d[0-3] = src - ref
- "vsubl.u8 q0, d8, d12 \n"
- "vsubl.u8 q1, d9, d13 \n"
-
- // load coeff16 into q8(d16=5352, d17=2217)
- "vld1.16 {q8}, [%[coeff16]] \n"
-
- // load coeff32 high half into q9 = 1812, q10 = 937
- "vld1.32 {q9, q10}, [%[coeff32]]! \n"
-
- // load coeff32 low half into q11=12000, q12=51000
- "vld1.32 {q11,q12}, [%[coeff32]] \n"
-
- // part 1
- // Transpose. Register dN is the same as dN in C
- "vtrn.32 d0, d2 \n"
- "vtrn.32 d1, d3 \n"
- "vtrn.16 d0, d1 \n"
- "vtrn.16 d2, d3 \n"
-
- "vadd.s16 d4, d0, d3 \n" // a0 = d0 + d3
- "vadd.s16 d5, d1, d2 \n" // a1 = d1 + d2
- "vsub.s16 d6, d1, d2 \n" // a2 = d1 - d2
- "vsub.s16 d7, d0, d3 \n" // a3 = d0 - d3
-
- "vadd.s16 d0, d4, d5 \n" // a0 + a1
- "vshl.s16 d0, d0, #3 \n" // temp[0+i*4] = (a0+a1) << 3
- "vsub.s16 d2, d4, d5 \n" // a0 - a1
- "vshl.s16 d2, d2, #3 \n" // (temp[2+i*4] = (a0-a1) << 3
-
- "vmlal.s16 q9, d7, d16 \n" // a3*5352 + 1812
- "vmlal.s16 q10, d7, d17 \n" // a3*2217 + 937
- "vmlal.s16 q9, d6, d17 \n" // a2*2217 + a3*5352 + 1812
- "vmlsl.s16 q10, d6, d16 \n" // a3*2217 + 937 - a2*5352
-
- // temp[1+i*4] = (d2*2217 + d3*5352 + 1812) >> 9
- // temp[3+i*4] = (d3*2217 + 937 - d2*5352) >> 9
- "vshrn.s32 d1, q9, #9 \n"
- "vshrn.s32 d3, q10, #9 \n"
-
- // part 2
- // transpose d0=ip[0], d1=ip[4], d2=ip[8], d3=ip[12]
- "vtrn.32 d0, d2 \n"
- "vtrn.32 d1, d3 \n"
- "vtrn.16 d0, d1 \n"
- "vtrn.16 d2, d3 \n"
-
- "vmov.s16 d26, #7 \n"
-
- "vadd.s16 d4, d0, d3 \n" // a1 = ip[0] + ip[12]
- "vadd.s16 d5, d1, d2 \n" // b1 = ip[4] + ip[8]
- "vsub.s16 d6, d1, d2 \n" // c1 = ip[4] - ip[8]
- "vadd.s16 d4, d4, d26 \n" // a1 + 7
- "vsub.s16 d7, d0, d3 \n" // d1 = ip[0] - ip[12]
-
- "vadd.s16 d0, d4, d5 \n" // op[0] = a1 + b1 + 7
- "vsub.s16 d2, d4, d5 \n" // op[8] = a1 - b1 + 7
-
- "vmlal.s16 q11, d7, d16 \n" // d1*5352 + 12000
- "vmlal.s16 q12, d7, d17 \n" // d1*2217 + 51000
-
- "vceq.s16 d4, d7, #0 \n"
-
- "vshr.s16 d0, d0, #4 \n"
- "vshr.s16 d2, d2, #4 \n"
-
- "vmlal.s16 q11, d6, d17 \n" // c1*2217 + d1*5352 + 12000
- "vmlsl.s16 q12, d6, d16 \n" // d1*2217 - c1*5352 + 51000
-
- "vmvn.s16 d4, d4 \n"
- // op[4] = (c1*2217 + d1*5352 + 12000)>>16
- "vshrn.s32 d1, q11, #16 \n"
- // op[4] += (d1!=0)
- "vsub.s16 d1, d1, d4 \n"
- // op[12]= (d1*2217 - c1*5352 + 51000)>>16
- "vshrn.s32 d3, q12, #16 \n"
-
- // set result to out array
- "vst1.16 {q0, q1}, [%[out]] \n"
- : [src_ptr] "+r"(src_ptr), [ref_ptr] "+r"(ref_ptr),
- [coeff32] "+r"(coeff32) // modified registers
- : [kBPS] "r"(kBPS), [coeff16] "r"(coeff16),
- [out] "r"(out) // constants
- : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9",
- "q10", "q11", "q12", "q13" // clobbered
- );
-}
-
-static void FTransformWHT(const int16_t* in, int16_t* out) {
- const int kStep = 32;
- __asm__ volatile (
- // d0 = in[0 * 16] , d1 = in[1 * 16]
- // d2 = in[2 * 16] , d3 = in[3 * 16]
- "vld1.16 d0[0], [%[in]], %[kStep] \n"
- "vld1.16 d1[0], [%[in]], %[kStep] \n"
- "vld1.16 d2[0], [%[in]], %[kStep] \n"
- "vld1.16 d3[0], [%[in]], %[kStep] \n"
- "vld1.16 d0[1], [%[in]], %[kStep] \n"
- "vld1.16 d1[1], [%[in]], %[kStep] \n"
- "vld1.16 d2[1], [%[in]], %[kStep] \n"
- "vld1.16 d3[1], [%[in]], %[kStep] \n"
- "vld1.16 d0[2], [%[in]], %[kStep] \n"
- "vld1.16 d1[2], [%[in]], %[kStep] \n"
- "vld1.16 d2[2], [%[in]], %[kStep] \n"
- "vld1.16 d3[2], [%[in]], %[kStep] \n"
- "vld1.16 d0[3], [%[in]], %[kStep] \n"
- "vld1.16 d1[3], [%[in]], %[kStep] \n"
- "vld1.16 d2[3], [%[in]], %[kStep] \n"
- "vld1.16 d3[3], [%[in]], %[kStep] \n"
-
- "vaddl.s16 q2, d0, d2 \n"
- "vshl.s32 q2, q2, #2 \n" // a0=(in[0*16]+in[2*16])<<2
- "vaddl.s16 q3, d1, d3 \n"
- "vshl.s32 q3, q3, #2 \n" // a1=(in[1*16]+in[3*16])<<2
- "vsubl.s16 q4, d1, d3 \n"
- "vshl.s32 q4, q4, #2 \n" // a2=(in[1*16]-in[3*16])<<2
- "vsubl.s16 q5, d0, d2 \n"
- "vshl.s32 q5, q5, #2 \n" // a3=(in[0*16]-in[2*16])<<2
-
- "vceq.s32 q10, q2, #0 \n"
- "vmvn.s32 q10, q10 \n" // (a0 != 0)
- "vqadd.s32 q6, q2, q3 \n" // (a0 + a1)
- "vqsub.s32 q6, q6, q10 \n" // (a0 + a1) + (a0 != 0)
- "vqadd.s32 q7, q5, q4 \n" // a3 + a2
- "vqsub.s32 q8, q5, q4 \n" // a3 - a2
- "vqsub.s32 q9, q2, q3 \n" // a0 - a1
-
- // Transpose
- // q6 = tmp[0, 1, 2, 3] ; q7 = tmp[ 4, 5, 6, 7]
- // q8 = tmp[8, 9, 10, 11] ; q9 = tmp[12, 13, 14, 15]
- "vswp d13, d16 \n" // vtrn.64 q0, q2
- "vswp d15, d18 \n" // vtrn.64 q1, q3
- "vtrn.32 q6, q7 \n"
- "vtrn.32 q8, q9 \n"
-
- "vqadd.s32 q0, q6, q8 \n" // a0 = tmp[0] + tmp[8]
- "vqadd.s32 q1, q7, q9 \n" // a1 = tmp[4] + tmp[12]
- "vqsub.s32 q2, q7, q9 \n" // a2 = tmp[4] - tmp[12]
- "vqsub.s32 q3, q6, q8 \n" // a3 = tmp[0] - tmp[8]
-
- "vqadd.s32 q4, q0, q1 \n" // b0 = a0 + a1
- "vqadd.s32 q5, q3, q2 \n" // b1 = a3 + a2
- "vqsub.s32 q6, q3, q2 \n" // b2 = a3 - a2
- "vqsub.s32 q7, q0, q1 \n" // b3 = a0 - a1
-
- "vmov.s32 q0, #3 \n" // q0 = 3
-
- "vcgt.s32 q1, q4, #0 \n" // (b0>0)
- "vqsub.s32 q2, q4, q1 \n" // (b0+(b0>0))
- "vqadd.s32 q3, q2, q0 \n" // (b0+(b0>0)+3)
- "vshrn.s32 d18, q3, #3 \n" // (b0+(b0>0)+3) >> 3
-
- "vcgt.s32 q1, q5, #0 \n" // (b1>0)
- "vqsub.s32 q2, q5, q1 \n" // (b1+(b1>0))
- "vqadd.s32 q3, q2, q0 \n" // (b1+(b1>0)+3)
- "vshrn.s32 d19, q3, #3 \n" // (b1+(b1>0)+3) >> 3
-
- "vcgt.s32 q1, q6, #0 \n" // (b2>0)
- "vqsub.s32 q2, q6, q1 \n" // (b2+(b2>0))
- "vqadd.s32 q3, q2, q0 \n" // (b2+(b2>0)+3)
- "vshrn.s32 d20, q3, #3 \n" // (b2+(b2>0)+3) >> 3
-
- "vcgt.s32 q1, q7, #0 \n" // (b3>0)
- "vqsub.s32 q2, q7, q1 \n" // (b3+(b3>0))
- "vqadd.s32 q3, q2, q0 \n" // (b3+(b3>0)+3)
- "vshrn.s32 d21, q3, #3 \n" // (b3+(b3>0)+3) >> 3
-
- "vst1.16 {q9, q10}, [%[out]] \n"
-
- : [in] "+r"(in)
- : [kStep] "r"(kStep), [out] "r"(out)
- : "memory", "q0", "q1", "q2", "q3", "q4", "q5",
- "q6", "q7", "q8", "q9", "q10" // clobbered
- ) ;
-}
-
-//------------------------------------------------------------------------------
-// Texture distortion
-//
-// We try to match the spectral content (weighted) between source and
-// reconstructed samples.
-
-// Hadamard transform
-// Returns the weighted sum of the absolute value of transformed coefficients.
-// This uses a TTransform helper function in C
-static int Disto4x4(const uint8_t* const a, const uint8_t* const b,
- const uint16_t* const w) {
- const int kBPS = BPS;
- const uint8_t* A = a;
- const uint8_t* B = b;
- const uint16_t* W = w;
- int sum;
- __asm__ volatile (
- "vld1.32 d0[0], [%[a]], %[kBPS] \n"
- "vld1.32 d0[1], [%[a]], %[kBPS] \n"
- "vld1.32 d2[0], [%[a]], %[kBPS] \n"
- "vld1.32 d2[1], [%[a]] \n"
-
- "vld1.32 d1[0], [%[b]], %[kBPS] \n"
- "vld1.32 d1[1], [%[b]], %[kBPS] \n"
- "vld1.32 d3[0], [%[b]], %[kBPS] \n"
- "vld1.32 d3[1], [%[b]] \n"
-
- // a d0/d2, b d1/d3
- // d0/d1: 01 01 01 01
- // d2/d3: 23 23 23 23
- // But: it goes 01 45 23 67
- // Notice the middle values are transposed
- "vtrn.16 q0, q1 \n"
-
- // {a0, a1} = {in[0] + in[2], in[1] + in[3]}
- "vaddl.u8 q2, d0, d2 \n"
- "vaddl.u8 q10, d1, d3 \n"
- // {a3, a2} = {in[0] - in[2], in[1] - in[3]}
- "vsubl.u8 q3, d0, d2 \n"
- "vsubl.u8 q11, d1, d3 \n"
-
- // tmp[0] = a0 + a1
- "vpaddl.s16 q0, q2 \n"
- "vpaddl.s16 q8, q10 \n"
-
- // tmp[1] = a3 + a2
- "vpaddl.s16 q1, q3 \n"
- "vpaddl.s16 q9, q11 \n"
-
- // No pair subtract
- // q2 = {a0, a3}
- // q3 = {a1, a2}
- "vtrn.16 q2, q3 \n"
- "vtrn.16 q10, q11 \n"
-
- // {tmp[3], tmp[2]} = {a0 - a1, a3 - a2}
- "vsubl.s16 q12, d4, d6 \n"
- "vsubl.s16 q13, d5, d7 \n"
- "vsubl.s16 q14, d20, d22 \n"
- "vsubl.s16 q15, d21, d23 \n"
-
- // separate tmp[3] and tmp[2]
- // q12 = tmp[3]
- // q13 = tmp[2]
- "vtrn.32 q12, q13 \n"
- "vtrn.32 q14, q15 \n"
-
- // Transpose tmp for a
- "vswp d1, d26 \n" // vtrn.64
- "vswp d3, d24 \n" // vtrn.64
- "vtrn.32 q0, q1 \n"
- "vtrn.32 q13, q12 \n"
-
- // Transpose tmp for b
- "vswp d17, d30 \n" // vtrn.64
- "vswp d19, d28 \n" // vtrn.64
- "vtrn.32 q8, q9 \n"
- "vtrn.32 q15, q14 \n"
-
- // The first Q register is a, the second b.
- // q0/8 tmp[0-3]
- // q13/15 tmp[4-7]
- // q1/9 tmp[8-11]
- // q12/14 tmp[12-15]
-
- // These are still in 01 45 23 67 order. We fix it easily in the addition
- // case but the subtraction propegates them.
- "vswp d3, d27 \n"
- "vswp d19, d31 \n"
-
- // a0 = tmp[0] + tmp[8]
- "vadd.s32 q2, q0, q1 \n"
- "vadd.s32 q3, q8, q9 \n"
-
- // a1 = tmp[4] + tmp[12]
- "vadd.s32 q10, q13, q12 \n"
- "vadd.s32 q11, q15, q14 \n"
-
- // a2 = tmp[4] - tmp[12]
- "vsub.s32 q13, q13, q12 \n"
- "vsub.s32 q15, q15, q14 \n"
-
- // a3 = tmp[0] - tmp[8]
- "vsub.s32 q0, q0, q1 \n"
- "vsub.s32 q8, q8, q9 \n"
-
- // b0 = a0 + a1
- "vadd.s32 q1, q2, q10 \n"
- "vadd.s32 q9, q3, q11 \n"
-
- // b1 = a3 + a2
- "vadd.s32 q12, q0, q13 \n"
- "vadd.s32 q14, q8, q15 \n"
-
- // b2 = a3 - a2
- "vsub.s32 q0, q0, q13 \n"
- "vsub.s32 q8, q8, q15 \n"
-
- // b3 = a0 - a1
- "vsub.s32 q2, q2, q10 \n"
- "vsub.s32 q3, q3, q11 \n"
-
- "vld1.64 {q10, q11}, [%[w]] \n"
-
- // abs(b0)
- "vabs.s32 q1, q1 \n"
- "vabs.s32 q9, q9 \n"
- // abs(b1)
- "vabs.s32 q12, q12 \n"
- "vabs.s32 q14, q14 \n"
- // abs(b2)
- "vabs.s32 q0, q0 \n"
- "vabs.s32 q8, q8 \n"
- // abs(b3)
- "vabs.s32 q2, q2 \n"
- "vabs.s32 q3, q3 \n"
-
- // expand w before using.
- "vmovl.u16 q13, d20 \n"
- "vmovl.u16 q15, d21 \n"
-
- // w[0] * abs(b0)
- "vmul.u32 q1, q1, q13 \n"
- "vmul.u32 q9, q9, q13 \n"
-
- // w[4] * abs(b1)
- "vmla.u32 q1, q12, q15 \n"
- "vmla.u32 q9, q14, q15 \n"
-
- // expand w before using.
- "vmovl.u16 q13, d22 \n"
- "vmovl.u16 q15, d23 \n"
-
- // w[8] * abs(b1)
- "vmla.u32 q1, q0, q13 \n"
- "vmla.u32 q9, q8, q13 \n"
-
- // w[12] * abs(b1)
- "vmla.u32 q1, q2, q15 \n"
- "vmla.u32 q9, q3, q15 \n"
-
- // Sum the arrays
- "vpaddl.u32 q1, q1 \n"
- "vpaddl.u32 q9, q9 \n"
- "vadd.u64 d2, d3 \n"
- "vadd.u64 d18, d19 \n"
-
- // Hadamard transform needs 4 bits of extra precision (2 bits in each
- // direction) for dynamic raw. Weights w[] are 16bits at max, so the maximum
- // precision for coeff is 8bit of input + 4bits of Hadamard transform +
- // 16bits for w[] + 2 bits of abs() summation.
- //
- // This uses a maximum of 31 bits (signed). Discarding the top 32 bits is
- // A-OK.
-
- // sum2 - sum1
- "vsub.u32 d0, d2, d18 \n"
- // abs(sum2 - sum1)
- "vabs.s32 d0, d0 \n"
- // abs(sum2 - sum1) >> 5
- "vshr.u32 d0, #5 \n"
-
- // It would be better to move the value straight into r0 but I'm not
- // entirely sure how this works with inline assembly.
- "vmov.32 %[sum], d0[0] \n"
-
- : [sum] "=r"(sum), [a] "+r"(A), [b] "+r"(B), [w] "+r"(W)
- : [kBPS] "r"(kBPS)
- : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9",
- "q10", "q11", "q12", "q13", "q14", "q15" // clobbered
- ) ;
-
- return sum;
-}
-
-static int Disto16x16(const uint8_t* const a, const uint8_t* const b,
- const uint16_t* const w) {
- int D = 0;
- int x, y;
- for (y = 0; y < 16 * BPS; y += 4 * BPS) {
- for (x = 0; x < 16; x += 4) {
- D += Disto4x4(a + x + y, b + x + y, w);
- }
- }
- return D;
-}
-
-#endif // WEBP_USE_NEON
-
-//------------------------------------------------------------------------------
-// Entry point
-
-extern void VP8EncDspInitNEON(void);
-
-void VP8EncDspInitNEON(void) {
-#if defined(WEBP_USE_NEON)
- VP8ITransform = ITransform;
- VP8FTransform = FTransform;
-
- VP8ITransformWHT = ITransformWHT;
- VP8FTransformWHT = FTransformWHT;
-
- VP8TDisto4x4 = Disto4x4;
- VP8TDisto16x16 = Disto16x16;
-#endif // WEBP_USE_NEON
-}
-
-#if defined(__cplusplus) || defined(c_plusplus)
-} // extern "C"
-#endif
diff --git a/src/dsp/enc_sse2.c b/src/dsp/enc_sse2.c
index c4148b56..b046761d 100644
--- a/src/dsp/enc_sse2.c
+++ b/src/dsp/enc_sse2.c
@@ -11,58 +11,27 @@
#include "./dsp.h"
-#if defined(__cplusplus) || defined(c_plusplus)
-extern "C" {
-#endif
-
#if defined(WEBP_USE_SSE2)
#include <stdlib.h> // for abs()
#include <emmintrin.h>
#include "../enc/vp8enci.h"
-//------------------------------------------------------------------------------
-// Quite useful macro for debugging. Left here for convenience.
-
-#if 0
-#include <stdio.h>
-static void PrintReg(const __m128i r, const char* const name, int size) {
- int n;
- union {
- __m128i r;
- uint8_t i8[16];
- uint16_t i16[8];
- uint32_t i32[4];
- uint64_t i64[2];
- } tmp;
- tmp.r = r;
- printf("%s\t: ", name);
- if (size == 8) {
- for (n = 0; n < 16; ++n) printf("%.2x ", tmp.i8[n]);
- } else if (size == 16) {
- for (n = 0; n < 8; ++n) printf("%.4x ", tmp.i16[n]);
- } else if (size == 32) {
- for (n = 0; n < 4; ++n) printf("%.8x ", tmp.i32[n]);
- } else {
- for (n = 0; n < 2; ++n) printf("%.16lx ", tmp.i64[n]);
- }
- printf("\n");
-}
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
#endif
//------------------------------------------------------------------------------
// Compute susceptibility based on DCT-coeff histograms:
// the higher, the "easier" the macroblock is to compress.
-static void CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred,
- int start_block, int end_block,
- VP8Histogram* const histo) {
+static int CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred,
+ int start_block, int end_block) {
+ int histo[MAX_COEFF_THRESH + 1] = { 0 };
+ int16_t out[16];
+ int j, k;
const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH);
- int j;
for (j = start_block; j < end_block; ++j) {
- int16_t out[16];
- int k;
-
VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out);
// Convert coefficients to bin (within out[]).
@@ -78,9 +47,9 @@ static void CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred,
const __m128i xor1 = _mm_xor_si128(out1, sign1);
const __m128i abs0 = _mm_sub_epi16(xor0, sign0);
const __m128i abs1 = _mm_sub_epi16(xor1, sign1);
- // v = abs(out) >> 3
- const __m128i v0 = _mm_srai_epi16(abs0, 3);
- const __m128i v1 = _mm_srai_epi16(abs1, 3);
+ // v = abs(out) >> 2
+ const __m128i v0 = _mm_srai_epi16(abs0, 2);
+ const __m128i v1 = _mm_srai_epi16(abs1, 2);
// bin = min(v, MAX_COEFF_THRESH)
const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh);
const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh);
@@ -89,11 +58,13 @@ static void CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred,
_mm_storeu_si128((__m128i*)&out[8], bin1);
}
- // Convert coefficients to bin.
+ // Use bin to update histogram.
for (k = 0; k < 16; ++k) {
- histo->distribution[out[k]]++;
+ histo[out[k]]++;
}
}
+
+ return VP8GetAlpha(histo);
}
//------------------------------------------------------------------------------
@@ -272,7 +243,7 @@ static void ITransformSSE2(const uint8_t* ref, const int16_t* in, uint8_t* dst,
// Add inverse transform to 'ref' and store.
{
- const __m128i zero = _mm_setzero_si128();
+ const __m128i zero = _mm_set1_epi16(0);
// Load the reference(s).
__m128i ref0, ref1, ref2, ref3;
if (do_two) {
@@ -324,22 +295,16 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
int16_t* out) {
const __m128i zero = _mm_setzero_si128();
const __m128i seven = _mm_set1_epi16(7);
- const __m128i k937 = _mm_set1_epi32(937);
- const __m128i k1812 = _mm_set1_epi32(1812);
+ const __m128i k7500 = _mm_set1_epi32(7500);
+ const __m128i k14500 = _mm_set1_epi32(14500);
const __m128i k51000 = _mm_set1_epi32(51000);
const __m128i k12000_plus_one = _mm_set1_epi32(12000 + (1 << 16));
const __m128i k5352_2217 = _mm_set_epi16(5352, 2217, 5352, 2217,
5352, 2217, 5352, 2217);
const __m128i k2217_5352 = _mm_set_epi16(2217, -5352, 2217, -5352,
2217, -5352, 2217, -5352);
- const __m128i k88p = _mm_set_epi16(8, 8, 8, 8, 8, 8, 8, 8);
- const __m128i k88m = _mm_set_epi16(-8, 8, -8, 8, -8, 8, -8, 8);
- const __m128i k5352_2217p = _mm_set_epi16(2217, 5352, 2217, 5352,
- 2217, 5352, 2217, 5352);
- const __m128i k5352_2217m = _mm_set_epi16(-5352, 2217, -5352, 2217,
- -5352, 2217, -5352, 2217);
- __m128i v01, v32;
+ __m128i v01, v32;
// Difference between src and ref and initial transpose.
{
@@ -361,52 +326,73 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
const __m128i ref_1 = _mm_unpacklo_epi8(ref1, zero);
const __m128i ref_2 = _mm_unpacklo_epi8(ref2, zero);
const __m128i ref_3 = _mm_unpacklo_epi8(ref3, zero);
- // Compute difference. -> 00 01 02 03 00 00 00 00
+ // Compute difference.
const __m128i diff0 = _mm_sub_epi16(src_0, ref_0);
const __m128i diff1 = _mm_sub_epi16(src_1, ref_1);
const __m128i diff2 = _mm_sub_epi16(src_2, ref_2);
const __m128i diff3 = _mm_sub_epi16(src_3, ref_3);
-
- // Unpack and shuffle
+ // Transpose.
// 00 01 02 03 0 0 0 0
// 10 11 12 13 0 0 0 0
// 20 21 22 23 0 0 0 0
// 30 31 32 33 0 0 0 0
- const __m128i shuf01 = _mm_unpacklo_epi32(diff0, diff1);
- const __m128i shuf23 = _mm_unpacklo_epi32(diff2, diff3);
- // 00 01 10 11 02 03 12 13
- // 20 21 30 31 22 23 32 33
- const __m128i shuf01_p =
- _mm_shufflehi_epi16(shuf01, _MM_SHUFFLE(2, 3, 0, 1));
- const __m128i shuf23_p =
- _mm_shufflehi_epi16(shuf23, _MM_SHUFFLE(2, 3, 0, 1));
- // 00 01 10 11 03 02 13 12
- // 20 21 30 31 23 22 33 32
- const __m128i s01 = _mm_unpacklo_epi64(shuf01_p, shuf23_p);
- const __m128i s32 = _mm_unpackhi_epi64(shuf01_p, shuf23_p);
- // 00 01 10 11 20 21 30 31
- // 03 02 13 12 23 22 33 32
- const __m128i a01 = _mm_add_epi16(s01, s32);
- const __m128i a32 = _mm_sub_epi16(s01, s32);
- // [d0 + d3 | d1 + d2 | ...] = [a0 a1 | a0' a1' | ... ]
- // [d0 - d3 | d1 - d2 | ...] = [a3 a2 | a3' a2' | ... ]
-
- const __m128i tmp0 = _mm_madd_epi16(a01, k88p); // [ (a0 + a1) << 3, ... ]
- const __m128i tmp2 = _mm_madd_epi16(a01, k88m); // [ (a0 - a1) << 3, ... ]
- const __m128i tmp1_1 = _mm_madd_epi16(a32, k5352_2217p);
- const __m128i tmp3_1 = _mm_madd_epi16(a32, k5352_2217m);
- const __m128i tmp1_2 = _mm_add_epi32(tmp1_1, k1812);
- const __m128i tmp3_2 = _mm_add_epi32(tmp3_1, k937);
- const __m128i tmp1 = _mm_srai_epi32(tmp1_2, 9);
- const __m128i tmp3 = _mm_srai_epi32(tmp3_2, 9);
- const __m128i s03 = _mm_packs_epi32(tmp0, tmp2);
- const __m128i s12 = _mm_packs_epi32(tmp1, tmp3);
- const __m128i s_lo = _mm_unpacklo_epi16(s03, s12); // 0 1 0 1 0 1...
- const __m128i s_hi = _mm_unpackhi_epi16(s03, s12); // 2 3 2 3 2 3
- const __m128i v23 = _mm_unpackhi_epi32(s_lo, s_hi);
- v01 = _mm_unpacklo_epi32(s_lo, s_hi);
- v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2)); // 3 2 3 2 3 2..
+ const __m128i transpose0_0 = _mm_unpacklo_epi16(diff0, diff1);
+ const __m128i transpose0_1 = _mm_unpacklo_epi16(diff2, diff3);
+ // 00 10 01 11 02 12 03 13
+ // 20 30 21 31 22 32 23 33
+ const __m128i v23 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
+ v01 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
+ v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2));
+ // a02 a12 a22 a32 a03 a13 a23 a33
+ // a00 a10 a20 a30 a01 a11 a21 a31
+ // a03 a13 a23 a33 a02 a12 a22 a32
+ }
+
+ // First pass and subsequent transpose.
+ {
+ // Same operations are done on the (0,3) and (1,2) pairs.
+ // b0 = (a0 + a3) << 3
+ // b1 = (a1 + a2) << 3
+ // b3 = (a0 - a3) << 3
+ // b2 = (a1 - a2) << 3
+ const __m128i a01 = _mm_add_epi16(v01, v32);
+ const __m128i a32 = _mm_sub_epi16(v01, v32);
+ const __m128i b01 = _mm_slli_epi16(a01, 3);
+ const __m128i b32 = _mm_slli_epi16(a32, 3);
+ const __m128i b11 = _mm_unpackhi_epi64(b01, b01);
+ const __m128i b22 = _mm_unpackhi_epi64(b32, b32);
+
+ // e0 = b0 + b1
+ // e2 = b0 - b1
+ const __m128i e0 = _mm_add_epi16(b01, b11);
+ const __m128i e2 = _mm_sub_epi16(b01, b11);
+ const __m128i e02 = _mm_unpacklo_epi64(e0, e2);
+
+ // e1 = (b3 * 5352 + b2 * 2217 + 14500) >> 12
+ // e3 = (b3 * 2217 - b2 * 5352 + 7500) >> 12
+ const __m128i b23 = _mm_unpacklo_epi16(b22, b32);
+ const __m128i c1 = _mm_madd_epi16(b23, k5352_2217);
+ const __m128i c3 = _mm_madd_epi16(b23, k2217_5352);
+ const __m128i d1 = _mm_add_epi32(c1, k14500);
+ const __m128i d3 = _mm_add_epi32(c3, k7500);
+ const __m128i e1 = _mm_srai_epi32(d1, 12);
+ const __m128i e3 = _mm_srai_epi32(d3, 12);
+ const __m128i e13 = _mm_packs_epi32(e1, e3);
+
+ // Transpose.
+ // 00 01 02 03 20 21 22 23
+ // 10 11 12 13 30 31 32 33
+ const __m128i transpose0_0 = _mm_unpacklo_epi16(e02, e13);
+ const __m128i transpose0_1 = _mm_unpackhi_epi16(e02, e13);
+ // 00 10 01 11 02 12 03 13
+ // 20 30 21 31 22 32 23 33
+ const __m128i v23 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
+ v01 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
+ v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2));
+ // 02 12 22 32 03 13 23 33
+ // 00 10 20 30 01 11 21 31
+ // 03 13 23 33 02 12 22 32
}
// Second pass
@@ -420,12 +406,13 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
const __m128i a32 = _mm_sub_epi16(v01, v32);
const __m128i a11 = _mm_unpackhi_epi64(a01, a01);
const __m128i a22 = _mm_unpackhi_epi64(a32, a32);
- const __m128i a01_plus_7 = _mm_add_epi16(a01, seven);
// d0 = (a0 + a1 + 7) >> 4;
// d2 = (a0 - a1 + 7) >> 4;
- const __m128i c0 = _mm_add_epi16(a01_plus_7, a11);
- const __m128i c2 = _mm_sub_epi16(a01_plus_7, a11);
+ const __m128i b0 = _mm_add_epi16(a01, a11);
+ const __m128i b2 = _mm_sub_epi16(a01, a11);
+ const __m128i c0 = _mm_add_epi16(b0, seven);
+ const __m128i c2 = _mm_add_epi16(b2, seven);
const __m128i d0 = _mm_srai_epi16(c0, 4);
const __m128i d2 = _mm_srai_epi16(c2, 4);
@@ -443,7 +430,6 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
// f1 = f1 + (a3 != 0);
// The compare will return (0xffff, 0) for (==0, !=0). To turn that into the
// desired (0, 1), we add one earlier through k12000_plus_one.
- // -> f1 = f1 + 1 - (a3 == 0)
const __m128i g1 = _mm_add_epi16(f1, _mm_cmpeq_epi16(a32, zero));
_mm_storel_epi64((__m128i*)&out[ 0], d0);
@@ -456,101 +442,10 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
//------------------------------------------------------------------------------
// Metric
-static int SSE_Nx4SSE2(const uint8_t* a, const uint8_t* b,
- int num_quads, int do_16) {
- const __m128i zero = _mm_setzero_si128();
- __m128i sum1 = zero;
- __m128i sum2 = zero;
-
- while (num_quads-- > 0) {
- // Note: for the !do_16 case, we read 16 pixels instead of 8 but that's ok,
- // thanks to buffer over-allocation to that effect.
- const __m128i a0 = _mm_loadu_si128((__m128i*)&a[BPS * 0]);
- const __m128i a1 = _mm_loadu_si128((__m128i*)&a[BPS * 1]);
- const __m128i a2 = _mm_loadu_si128((__m128i*)&a[BPS * 2]);
- const __m128i a3 = _mm_loadu_si128((__m128i*)&a[BPS * 3]);
- const __m128i b0 = _mm_loadu_si128((__m128i*)&b[BPS * 0]);
- const __m128i b1 = _mm_loadu_si128((__m128i*)&b[BPS * 1]);
- const __m128i b2 = _mm_loadu_si128((__m128i*)&b[BPS * 2]);
- const __m128i b3 = _mm_loadu_si128((__m128i*)&b[BPS * 3]);
-
- // compute clip0(a-b) and clip0(b-a)
- const __m128i a0p = _mm_subs_epu8(a0, b0);
- const __m128i a0m = _mm_subs_epu8(b0, a0);
- const __m128i a1p = _mm_subs_epu8(a1, b1);
- const __m128i a1m = _mm_subs_epu8(b1, a1);
- const __m128i a2p = _mm_subs_epu8(a2, b2);
- const __m128i a2m = _mm_subs_epu8(b2, a2);
- const __m128i a3p = _mm_subs_epu8(a3, b3);
- const __m128i a3m = _mm_subs_epu8(b3, a3);
-
- // compute |a-b| with 8b arithmetic as clip0(a-b) | clip0(b-a)
- const __m128i diff0 = _mm_or_si128(a0p, a0m);
- const __m128i diff1 = _mm_or_si128(a1p, a1m);
- const __m128i diff2 = _mm_or_si128(a2p, a2m);
- const __m128i diff3 = _mm_or_si128(a3p, a3m);
-
- // unpack (only four operations, instead of eight)
- const __m128i low0 = _mm_unpacklo_epi8(diff0, zero);
- const __m128i low1 = _mm_unpacklo_epi8(diff1, zero);
- const __m128i low2 = _mm_unpacklo_epi8(diff2, zero);
- const __m128i low3 = _mm_unpacklo_epi8(diff3, zero);
-
- // multiply with self
- const __m128i low_madd0 = _mm_madd_epi16(low0, low0);
- const __m128i low_madd1 = _mm_madd_epi16(low1, low1);
- const __m128i low_madd2 = _mm_madd_epi16(low2, low2);
- const __m128i low_madd3 = _mm_madd_epi16(low3, low3);
-
- // collect in a cascading way
- const __m128i low_sum0 = _mm_add_epi32(low_madd0, low_madd1);
- const __m128i low_sum1 = _mm_add_epi32(low_madd2, low_madd3);
- sum1 = _mm_add_epi32(sum1, low_sum0);
- sum2 = _mm_add_epi32(sum2, low_sum1);
-
- if (do_16) { // if necessary, process the higher 8 bytes similarly
- const __m128i hi0 = _mm_unpackhi_epi8(diff0, zero);
- const __m128i hi1 = _mm_unpackhi_epi8(diff1, zero);
- const __m128i hi2 = _mm_unpackhi_epi8(diff2, zero);
- const __m128i hi3 = _mm_unpackhi_epi8(diff3, zero);
-
- const __m128i hi_madd0 = _mm_madd_epi16(hi0, hi0);
- const __m128i hi_madd1 = _mm_madd_epi16(hi1, hi1);
- const __m128i hi_madd2 = _mm_madd_epi16(hi2, hi2);
- const __m128i hi_madd3 = _mm_madd_epi16(hi3, hi3);
- const __m128i hi_sum0 = _mm_add_epi32(hi_madd0, hi_madd1);
- const __m128i hi_sum1 = _mm_add_epi32(hi_madd2, hi_madd3);
- sum1 = _mm_add_epi32(sum1, hi_sum0);
- sum2 = _mm_add_epi32(sum2, hi_sum1);
- }
- a += 4 * BPS;
- b += 4 * BPS;
- }
- {
- int32_t tmp[4];
- const __m128i sum = _mm_add_epi32(sum1, sum2);
- _mm_storeu_si128((__m128i*)tmp, sum);
- return (tmp[3] + tmp[2] + tmp[1] + tmp[0]);
- }
-}
-
-static int SSE16x16SSE2(const uint8_t* a, const uint8_t* b) {
- return SSE_Nx4SSE2(a, b, 4, 1);
-}
-
-static int SSE16x8SSE2(const uint8_t* a, const uint8_t* b) {
- return SSE_Nx4SSE2(a, b, 2, 1);
-}
-
-static int SSE8x8SSE2(const uint8_t* a, const uint8_t* b) {
- return SSE_Nx4SSE2(a, b, 2, 0);
-}
-
static int SSE4x4SSE2(const uint8_t* a, const uint8_t* b) {
- const __m128i zero = _mm_setzero_si128();
+ const __m128i zero = _mm_set1_epi16(0);
- // Load values. Note that we read 8 pixels instead of 4,
- // but the a/b buffers are over-allocated to that effect.
+ // Load values.
const __m128i a0 = _mm_loadl_epi64((__m128i*)&a[BPS * 0]);
const __m128i a1 = _mm_loadl_epi64((__m128i*)&a[BPS * 1]);
const __m128i a2 = _mm_loadl_epi64((__m128i*)&a[BPS * 2]);
@@ -588,7 +483,6 @@ static int SSE4x4SSE2(const uint8_t* a, const uint8_t* b) {
const __m128i sum0 = _mm_add_epi32(madd0, madd1);
const __m128i sum1 = _mm_add_epi32(madd2, madd3);
const __m128i sum2 = _mm_add_epi32(sum0, sum1);
-
int32_t tmp[4];
_mm_storeu_si128((__m128i*)tmp, sum2);
return (tmp[3] + tmp[2] + tmp[1] + tmp[0]);
@@ -608,6 +502,8 @@ static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
int32_t sum[4];
__m128i tmp_0, tmp_1, tmp_2, tmp_3;
const __m128i zero = _mm_setzero_si128();
+ const __m128i one = _mm_set1_epi16(1);
+ const __m128i three = _mm_set1_epi16(3);
// Load, combine and tranpose inputs.
{
@@ -654,14 +550,17 @@ static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
// Horizontal pass and subsequent transpose.
{
// Calculate a and b (two 4x4 at once).
- const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);
- const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);
- const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);
- const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);
- const __m128i b0 = _mm_add_epi16(a0, a1);
+ const __m128i a0 = _mm_slli_epi16(_mm_add_epi16(tmp_0, tmp_2), 2);
+ const __m128i a1 = _mm_slli_epi16(_mm_add_epi16(tmp_1, tmp_3), 2);
+ const __m128i a2 = _mm_slli_epi16(_mm_sub_epi16(tmp_1, tmp_3), 2);
+ const __m128i a3 = _mm_slli_epi16(_mm_sub_epi16(tmp_0, tmp_2), 2);
+ // b0_extra = (a0 != 0);
+ const __m128i b0_extra = _mm_andnot_si128(_mm_cmpeq_epi16 (a0, zero), one);
+ const __m128i b0_base = _mm_add_epi16(a0, a1);
const __m128i b1 = _mm_add_epi16(a3, a2);
const __m128i b2 = _mm_sub_epi16(a3, a2);
const __m128i b3 = _mm_sub_epi16(a0, a1);
+ const __m128i b0 = _mm_add_epi16(b0_base, b0_extra);
// a00 a01 a02 a03 b00 b01 b02 b03
// a10 a11 a12 a13 b10 b11 b12 b13
// a20 a21 a22 a23 b20 b21 b22 b23
@@ -736,6 +635,19 @@ static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
B_b2 = _mm_sub_epi16(B_b2, sign_B_b2);
}
+ // b = abs(b) + 3
+ A_b0 = _mm_add_epi16(A_b0, three);
+ A_b2 = _mm_add_epi16(A_b2, three);
+ B_b0 = _mm_add_epi16(B_b0, three);
+ B_b2 = _mm_add_epi16(B_b2, three);
+
+ // abs((b + (b<0) + 3) >> 3) = (abs(b) + 3) >> 3
+ // b = (abs(b) + 3) >> 3
+ A_b0 = _mm_srai_epi16(A_b0, 3);
+ A_b2 = _mm_srai_epi16(A_b2, 3);
+ B_b0 = _mm_srai_epi16(B_b0, 3);
+ B_b2 = _mm_srai_epi16(B_b2, 3);
+
// weighted sums
A_b0 = _mm_madd_epi16(A_b0, w_0);
A_b2 = _mm_madd_epi16(A_b2, w_8);
@@ -754,7 +666,7 @@ static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
static int Disto4x4SSE2(const uint8_t* const a, const uint8_t* const b,
const uint16_t* const w) {
const int diff_sum = TTransformSSE2(a, b, w);
- return abs(diff_sum) >> 5;
+ return (abs(diff_sum) + 8) >> 4;
}
static int Disto16x16SSE2(const uint8_t* const a, const uint8_t* const b,
@@ -769,6 +681,7 @@ static int Disto16x16SSE2(const uint8_t* const a, const uint8_t* const b,
return D;
}
+
//------------------------------------------------------------------------------
// Quantization
//
@@ -777,7 +690,8 @@ static int Disto16x16SSE2(const uint8_t* const a, const uint8_t* const b,
static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
int n, const VP8Matrix* const mtx) {
const __m128i max_coeff_2047 = _mm_set1_epi16(2047);
- const __m128i zero = _mm_setzero_si128();
+ const __m128i zero = _mm_set1_epi16(0);
+ __m128i sign0, sign8;
__m128i coeff0, coeff8;
__m128i out0, out8;
__m128i packed_out;
@@ -799,8 +713,8 @@ static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
const __m128i zthresh8 = _mm_loadu_si128((__m128i*)&mtx->zthresh_[8]);
// sign(in) = in >> 15 (0x0000 if positive, 0xffff if negative)
- const __m128i sign0 = _mm_srai_epi16(in0, 15);
- const __m128i sign8 = _mm_srai_epi16(in8, 15);
+ sign0 = _mm_srai_epi16(in0, 15);
+ sign8 = _mm_srai_epi16(in8, 15);
// coeff = abs(in) = (in ^ sign) - sign
coeff0 = _mm_xor_si128(in0, sign0);
@@ -905,28 +819,19 @@ static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
}
}
-#endif // WEBP_USE_SSE2
-
-//------------------------------------------------------------------------------
-// Entry point
-
extern void VP8EncDspInitSSE2(void);
-
void VP8EncDspInitSSE2(void) {
-#if defined(WEBP_USE_SSE2)
VP8CollectHistogram = CollectHistogramSSE2;
VP8EncQuantizeBlock = QuantizeBlockSSE2;
VP8ITransform = ITransformSSE2;
VP8FTransform = FTransformSSE2;
- VP8SSE16x16 = SSE16x16SSE2;
- VP8SSE16x8 = SSE16x8SSE2;
- VP8SSE8x8 = SSE8x8SSE2;
VP8SSE4x4 = SSE4x4SSE2;
VP8TDisto4x4 = Disto4x4SSE2;
VP8TDisto16x16 = Disto16x16SSE2;
-#endif // WEBP_USE_SSE2
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
+
+#endif // WEBP_USE_SSE2
diff --git a/src/dsp/lossless.c b/src/dsp/lossless.c
index f951b897..6d3094fd 100644
--- a/src/dsp/lossless.c
+++ b/src/dsp/lossless.c
@@ -11,6 +11,8 @@
// Jyrki Alakuijala (jyrki@google.com)
// Urvang Joshi (urvang@google.com)
+#define ANDROID_WEBP_RGB
+
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
@@ -21,6 +23,7 @@ extern "C" {
#include "../dec/vp8li.h"
#include "../dsp/yuv.h"
#include "../dsp/dsp.h"
+#include "../enc/histogram.h"
#define MAX_DIFF_COST (1e30f)
@@ -1033,14 +1036,12 @@ static void ConvertBGRAToRGBA4444(const uint32_t* src,
const uint32_t* const src_end = src + num_pixels;
while (src < src_end) {
const uint32_t argb = *src++;
- const uint8_t rg = ((argb >> 16) & 0xf0) | ((argb >> 12) & 0xf);
- const uint8_t ba = ((argb >> 0) & 0xf0) | ((argb >> 28) & 0xf);
-#ifdef WEBP_SWAP_16BIT_CSP
- *dst++ = ba;
- *dst++ = rg;
+#ifdef ANDROID_WEBP_RGB
+ *dst++ = ((argb >> 0) & 0xf0) | ((argb >> 28) & 0xf);
+ *dst++ = ((argb >> 16) & 0xf0) | ((argb >> 12) & 0xf);
#else
- *dst++ = rg;
- *dst++ = ba;
+ *dst++ = ((argb >> 16) & 0xf0) | ((argb >> 12) & 0xf);
+ *dst++ = ((argb >> 0) & 0xf0) | ((argb >> 28) & 0xf);
#endif
}
}
@@ -1050,14 +1051,12 @@ static void ConvertBGRAToRGB565(const uint32_t* src,
const uint32_t* const src_end = src + num_pixels;
while (src < src_end) {
const uint32_t argb = *src++;
- const uint8_t rg = ((argb >> 16) & 0xf8) | ((argb >> 13) & 0x7);
- const uint8_t gb = ((argb >> 5) & 0xe0) | ((argb >> 3) & 0x1f);
-#ifdef WEBP_SWAP_16BIT_CSP
- *dst++ = gb;
- *dst++ = rg;
+#ifdef ANDROID_WEBP_RGB
+ *dst++ = ((argb >> 5) & 0xe0) | ((argb >> 3) & 0x1f);
+ *dst++ = ((argb >> 16) & 0xf8) | ((argb >> 13) & 0x7);
#else
- *dst++ = rg;
- *dst++ = gb;
+ *dst++ = ((argb >> 16) & 0xf8) | ((argb >> 13) & 0x7);
+ *dst++ = ((argb >> 5) & 0xe0) | ((argb >> 3) & 0x1f);
#endif
}
}
diff --git a/src/dsp/upsampling.c b/src/dsp/upsampling.c
index 91d939cd..4855eb14 100644
--- a/src/dsp/upsampling.c
+++ b/src/dsp/upsampling.c
@@ -328,11 +328,6 @@ void WebPInitUpsamplers(void) {
WebPInitUpsamplersSSE2();
}
#endif
-#if defined(WEBP_USE_NEON)
- if (VP8GetCPUInfo(kNEON)) {
- WebPInitUpsamplersNEON();
- }
-#endif
}
#endif // FANCY_UPSAMPLING
}
@@ -353,11 +348,6 @@ void WebPInitPremultiply(void) {
WebPInitPremultiplySSE2();
}
#endif
-#if defined(WEBP_USE_NEON)
- if (VP8GetCPUInfo(kNEON)) {
- WebPInitPremultiplyNEON();
- }
-#endif
}
#endif // FANCY_UPSAMPLING
}
diff --git a/src/dsp/upsampling_neon.c b/src/dsp/upsampling_neon.c
deleted file mode 100644
index 00e2f892..00000000
--- a/src/dsp/upsampling_neon.c
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// This code is licensed under the same terms as WebM:
-// Software License Agreement: http://www.webmproject.org/license/software/
-// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
-// -----------------------------------------------------------------------------
-//
-// NEON version of YUV to RGB upsampling functions.
-//
-// Author: mans@mansr.com (Mans Rullgard)
-// Based on SSE code by: somnath@google.com (Somnath Banerjee)
-
-#include "./dsp.h"
-
-#if defined(__cplusplus) || defined(c_plusplus)
-extern "C" {
-#endif
-
-#if defined(WEBP_USE_NEON)
-
-#include <assert.h>
-#include <arm_neon.h>
-#include <string.h>
-#include "./yuv.h"
-
-#ifdef FANCY_UPSAMPLING
-
-// Loads 9 pixels each from rows r1 and r2 and generates 16 pixels.
-#define UPSAMPLE_16PIXELS(r1, r2, out) { \
- uint8x8_t a = vld1_u8(r1); \
- uint8x8_t b = vld1_u8(r1 + 1); \
- uint8x8_t c = vld1_u8(r2); \
- uint8x8_t d = vld1_u8(r2 + 1); \
- \
- uint16x8_t al = vshll_n_u8(a, 1); \
- uint16x8_t bl = vshll_n_u8(b, 1); \
- uint16x8_t cl = vshll_n_u8(c, 1); \
- uint16x8_t dl = vshll_n_u8(d, 1); \
- \
- uint8x8_t diag1, diag2; \
- uint16x8_t sl; \
- \
- /* a + b + c + d */ \
- sl = vaddl_u8(a, b); \
- sl = vaddw_u8(sl, c); \
- sl = vaddw_u8(sl, d); \
- \
- al = vaddq_u16(sl, al); /* 3a + b + c + d */ \
- bl = vaddq_u16(sl, bl); /* a + 3b + c + d */ \
- \
- al = vaddq_u16(al, dl); /* 3a + b + c + 3d */ \
- bl = vaddq_u16(bl, cl); /* a + 3b + 3c + d */ \
- \
- diag2 = vshrn_n_u16(al, 3); \
- diag1 = vshrn_n_u16(bl, 3); \
- \
- a = vrhadd_u8(a, diag1); \
- b = vrhadd_u8(b, diag2); \
- c = vrhadd_u8(c, diag2); \
- d = vrhadd_u8(d, diag1); \
- \
- { \
- const uint8x8x2_t a_b = {{ a, b }}; \
- const uint8x8x2_t c_d = {{ c, d }}; \
- vst2_u8(out, a_b); \
- vst2_u8(out + 32, c_d); \
- } \
-}
-
-// Turn the macro into a function for reducing code-size when non-critical
-static void Upsample16Pixels(const uint8_t *r1, const uint8_t *r2,
- uint8_t *out) {
- UPSAMPLE_16PIXELS(r1, r2, out);
-}
-
-#define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \
- uint8_t r1[9], r2[9]; \
- memcpy(r1, (tb), (num_pixels)); \
- memcpy(r2, (bb), (num_pixels)); \
- /* replicate last byte */ \
- memset(r1 + (num_pixels), r1[(num_pixels) - 1], 9 - (num_pixels)); \
- memset(r2 + (num_pixels), r2[(num_pixels) - 1], 9 - (num_pixels)); \
- Upsample16Pixels(r1, r2, out); \
-}
-
-#define CY 76283
-#define CVR 89858
-#define CUG 22014
-#define CVG 45773
-#define CUB 113618
-
-static const int16_t coef[4] = { CVR / 4, CUG, CVG / 2, CUB / 4 };
-
-#define CONVERT8(FMT, XSTEP, N, src_y, src_uv, out, cur_x) { \
- int i; \
- for (i = 0; i < N; i += 8) { \
- int off = ((cur_x) + i) * XSTEP; \
- uint8x8_t y = vld1_u8(src_y + (cur_x) + i); \
- uint8x8_t u = vld1_u8((src_uv) + i); \
- uint8x8_t v = vld1_u8((src_uv) + i + 16); \
- int16x8_t yy = vreinterpretq_s16_u16(vsubl_u8(y, u16)); \
- int16x8_t uu = vreinterpretq_s16_u16(vsubl_u8(u, u128)); \
- int16x8_t vv = vreinterpretq_s16_u16(vsubl_u8(v, u128)); \
- \
- int16x8_t ud = vshlq_n_s16(uu, 1); \
- int16x8_t vd = vshlq_n_s16(vv, 1); \
- \
- int32x4_t vrl = vqdmlal_lane_s16(vshll_n_s16(vget_low_s16(vv), 1), \
- vget_low_s16(vd), cf16, 0); \
- int32x4_t vrh = vqdmlal_lane_s16(vshll_n_s16(vget_high_s16(vv), 1), \
- vget_high_s16(vd), cf16, 0); \
- int16x8_t vr = vcombine_s16(vrshrn_n_s32(vrl, 16), \
- vrshrn_n_s32(vrh, 16)); \
- \
- int32x4_t vl = vmovl_s16(vget_low_s16(vv)); \
- int32x4_t vh = vmovl_s16(vget_high_s16(vv)); \
- int32x4_t ugl = vmlal_lane_s16(vl, vget_low_s16(uu), cf16, 1); \
- int32x4_t ugh = vmlal_lane_s16(vh, vget_high_s16(uu), cf16, 1); \
- int32x4_t gcl = vqdmlal_lane_s16(ugl, vget_low_s16(vv), cf16, 2); \
- int32x4_t gch = vqdmlal_lane_s16(ugh, vget_high_s16(vv), cf16, 2); \
- int16x8_t gc = vcombine_s16(vrshrn_n_s32(gcl, 16), \
- vrshrn_n_s32(gch, 16)); \
- \
- int32x4_t ubl = vqdmlal_lane_s16(vshll_n_s16(vget_low_s16(uu), 1), \
- vget_low_s16(ud), cf16, 3); \
- int32x4_t ubh = vqdmlal_lane_s16(vshll_n_s16(vget_high_s16(uu), 1), \
- vget_high_s16(ud), cf16, 3); \
- int16x8_t ub = vcombine_s16(vrshrn_n_s32(ubl, 16), \
- vrshrn_n_s32(ubh, 16)); \
- \
- int32x4_t rl = vaddl_s16(vget_low_s16(yy), vget_low_s16(vr)); \
- int32x4_t rh = vaddl_s16(vget_high_s16(yy), vget_high_s16(vr)); \
- int32x4_t gl = vsubl_s16(vget_low_s16(yy), vget_low_s16(gc)); \
- int32x4_t gh = vsubl_s16(vget_high_s16(yy), vget_high_s16(gc)); \
- int32x4_t bl = vaddl_s16(vget_low_s16(yy), vget_low_s16(ub)); \
- int32x4_t bh = vaddl_s16(vget_high_s16(yy), vget_high_s16(ub)); \
- \
- rl = vmulq_lane_s32(rl, cf32, 0); \
- rh = vmulq_lane_s32(rh, cf32, 0); \
- gl = vmulq_lane_s32(gl, cf32, 0); \
- gh = vmulq_lane_s32(gh, cf32, 0); \
- bl = vmulq_lane_s32(bl, cf32, 0); \
- bh = vmulq_lane_s32(bh, cf32, 0); \
- \
- y = vqmovun_s16(vcombine_s16(vrshrn_n_s32(rl, 16), \
- vrshrn_n_s32(rh, 16))); \
- u = vqmovun_s16(vcombine_s16(vrshrn_n_s32(gl, 16), \
- vrshrn_n_s32(gh, 16))); \
- v = vqmovun_s16(vcombine_s16(vrshrn_n_s32(bl, 16), \
- vrshrn_n_s32(bh, 16))); \
- STR_ ## FMT(out + off, y, u, v); \
- } \
-}
-
-#define v255 vmov_n_u8(255)
-
-#define STR_Rgb(out, r, g, b) do { \
- const uint8x8x3_t r_g_b = {{ r, g, b }}; \
- vst3_u8(out, r_g_b); \
-} while (0)
-
-#define STR_Bgr(out, r, g, b) do { \
- const uint8x8x3_t b_g_r = {{ b, g, r }}; \
- vst3_u8(out, b_g_r); \
-} while (0)
-
-#define STR_Rgba(out, r, g, b) do { \
- const uint8x8x4_t r_g_b_v255 = {{ r, g, b, v255 }}; \
- vst4_u8(out, r_g_b_v255); \
-} while (0)
-
-#define STR_Bgra(out, r, g, b) do { \
- const uint8x8x4_t b_g_r_v255 = {{ b, g, r, v255 }}; \
- vst4_u8(out, b_g_r_v255); \
-} while (0)
-
-#define CONVERT1(FMT, XSTEP, N, src_y, src_uv, rgb, cur_x) { \
- int i; \
- for (i = 0; i < N; i++) { \
- int off = ((cur_x) + i) * XSTEP; \
- int y = src_y[(cur_x) + i]; \
- int u = (src_uv)[i]; \
- int v = (src_uv)[i + 16]; \
- VP8YuvTo ## FMT(y, u, v, rgb + off); \
- } \
-}
-
-#define CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, uv, \
- top_dst, bottom_dst, cur_x, len) { \
- if (top_y) { \
- CONVERT8(FMT, XSTEP, len, top_y, uv, top_dst, cur_x) \
- } \
- if (bottom_y) { \
- CONVERT8(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x) \
- } \
-}
-
-#define CONVERT2RGB_1(FMT, XSTEP, top_y, bottom_y, uv, \
- top_dst, bottom_dst, cur_x, len) { \
- if (top_y) { \
- CONVERT1(FMT, XSTEP, len, top_y, uv, top_dst, cur_x); \
- } \
- if (bottom_y) { \
- CONVERT1(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x); \
- } \
-}
-
-#define NEON_UPSAMPLE_FUNC(FUNC_NAME, FMT, XSTEP) \
-static void FUNC_NAME(const uint8_t *top_y, const uint8_t *bottom_y, \
- const uint8_t *top_u, const uint8_t *top_v, \
- const uint8_t *cur_u, const uint8_t *cur_v, \
- uint8_t *top_dst, uint8_t *bottom_dst, int len) { \
- int block; \
- /* 16 byte aligned array to cache reconstructed u and v */ \
- uint8_t uv_buf[2 * 32 + 15]; \
- uint8_t *const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
- const int uv_len = (len + 1) >> 1; \
- /* 9 pixels must be read-able for each block */ \
- const int num_blocks = (uv_len - 1) >> 3; \
- const int leftover = uv_len - num_blocks * 8; \
- const int last_pos = 1 + 16 * num_blocks; \
- \
- const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \
- const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \
- \
- const int16x4_t cf16 = vld1_s16(coef); \
- const int32x2_t cf32 = vmov_n_s32(CY); \
- const uint8x8_t u16 = vmov_n_u8(16); \
- const uint8x8_t u128 = vmov_n_u8(128); \
- \
- /* Treat the first pixel in regular way */ \
- if (top_y) { \
- const int u0 = (top_u[0] + u_diag) >> 1; \
- const int v0 = (top_v[0] + v_diag) >> 1; \
- VP8YuvTo ## FMT(top_y[0], u0, v0, top_dst); \
- } \
- if (bottom_y) { \
- const int u0 = (cur_u[0] + u_diag) >> 1; \
- const int v0 = (cur_v[0] + v_diag) >> 1; \
- VP8YuvTo ## FMT(bottom_y[0], u0, v0, bottom_dst); \
- } \
- \
- for (block = 0; block < num_blocks; ++block) { \
- UPSAMPLE_16PIXELS(top_u, cur_u, r_uv); \
- UPSAMPLE_16PIXELS(top_v, cur_v, r_uv + 16); \
- CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, r_uv, \
- top_dst, bottom_dst, 16 * block + 1, 16); \
- top_u += 8; \
- cur_u += 8; \
- top_v += 8; \
- cur_v += 8; \
- } \
- \
- UPSAMPLE_LAST_BLOCK(top_u, cur_u, leftover, r_uv); \
- UPSAMPLE_LAST_BLOCK(top_v, cur_v, leftover, r_uv + 16); \
- CONVERT2RGB_1(FMT, XSTEP, top_y, bottom_y, r_uv, \
- top_dst, bottom_dst, last_pos, len - last_pos); \
-}
-
-// NEON variants of the fancy upsampler.
-NEON_UPSAMPLE_FUNC(UpsampleRgbLinePairNEON, Rgb, 3)
-NEON_UPSAMPLE_FUNC(UpsampleBgrLinePairNEON, Bgr, 3)
-NEON_UPSAMPLE_FUNC(UpsampleRgbaLinePairNEON, Rgba, 4)
-NEON_UPSAMPLE_FUNC(UpsampleBgraLinePairNEON, Bgra, 4)
-
-#endif // FANCY_UPSAMPLING
-
-#endif // WEBP_USE_NEON
-
-//------------------------------------------------------------------------------
-
-extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
-
-void WebPInitUpsamplersNEON(void) {
-#if defined(WEBP_USE_NEON)
- WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePairNEON;
- WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePairNEON;
- WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePairNEON;
- WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePairNEON;
-#endif // WEBP_USE_NEON
-}
-
-void WebPInitPremultiplyNEON(void) {
-#if defined(WEBP_USE_NEON)
- WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePairNEON;
- WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePairNEON;
-#endif // WEBP_USE_NEON
-}
-
-#if defined(__cplusplus) || defined(c_plusplus)
-} // extern "C"
-#endif
diff --git a/src/dsp/upsampling_sse2.c b/src/dsp/upsampling_sse2.c
index ba075d11..8cb275a0 100644
--- a/src/dsp/upsampling_sse2.c
+++ b/src/dsp/upsampling_sse2.c
@@ -11,10 +11,6 @@
#include "./dsp.h"
-#if defined(__cplusplus) || defined(c_plusplus)
-extern "C" {
-#endif
-
#if defined(WEBP_USE_SSE2)
#include <assert.h>
@@ -22,6 +18,10 @@ extern "C" {
#include <string.h>
#include "./yuv.h"
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
#ifdef FANCY_UPSAMPLING
// We compute (9*a + 3*b + 3*c + d + 8) / 16 as follows
@@ -51,12 +51,12 @@ extern "C" {
// pack and store two alterning pixel rows
#define PACK_AND_STORE(a, b, da, db, out) do { \
- const __m128i t_a = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \
- const __m128i t_b = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \
- const __m128i t_1 = _mm_unpacklo_epi8(t_a, t_b); \
- const __m128i t_2 = _mm_unpackhi_epi8(t_a, t_b); \
- _mm_store_si128(((__m128i*)(out)) + 0, t_1); \
- _mm_store_si128(((__m128i*)(out)) + 1, t_2); \
+ const __m128i ta = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \
+ const __m128i tb = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \
+ const __m128i t1 = _mm_unpacklo_epi8(ta, tb); \
+ const __m128i t2 = _mm_unpackhi_epi8(ta, tb); \
+ _mm_store_si128(((__m128i*)(out)) + 0, t1); \
+ _mm_store_si128(((__m128i*)(out)) + 1, t2); \
} while (0)
// Loads 17 pixels each from rows r1 and r2 and generates 32 pixels.
@@ -128,7 +128,7 @@ static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
const uint8_t* top_u, const uint8_t* top_v, \
const uint8_t* cur_u, const uint8_t* cur_v, \
uint8_t* top_dst, uint8_t* bottom_dst, int len) { \
- int block; \
+ int b; \
/* 16 byte aligned array to cache reconstructed u and v */ \
uint8_t uv_buf[4 * 32 + 15]; \
uint8_t* const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
@@ -154,11 +154,11 @@ static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
FUNC(bottom_y[0], u0, v0, bottom_dst); \
} \
\
- for (block = 0; block < num_blocks; ++block) { \
+ for (b = 0; b < num_blocks; ++b) { \
UPSAMPLE_32PIXELS(top_u, cur_u, r_uv + 0 * 32); \
UPSAMPLE_32PIXELS(top_v, cur_v, r_uv + 1 * 32); \
CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, r_uv, top_dst, bottom_dst, \
- 32 * block + 1, 32) \
+ 32 * b + 1, 32) \
top_u += 16; \
cur_u += 16; \
top_v += 16; \
@@ -184,32 +184,26 @@ SSE2_UPSAMPLE_FUNC(UpsampleBgraLinePairSSE2, VP8YuvToBgra, 4)
#undef CONVERT2RGB
#undef SSE2_UPSAMPLE_FUNC
-#endif // FANCY_UPSAMPLING
-
-#endif // WEBP_USE_SSE2
-
//------------------------------------------------------------------------------
extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
void WebPInitUpsamplersSSE2(void) {
-#if defined(WEBP_USE_SSE2)
WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePairSSE2;
WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePairSSE2;
WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePairSSE2;
WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePairSSE2;
-#endif // WEBP_USE_SSE2
}
void WebPInitPremultiplySSE2(void) {
-#if defined(WEBP_USE_SSE2)
WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePairSSE2;
WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePairSSE2;
-#endif // WEBP_USE_SSE2
}
+#endif // FANCY_UPSAMPLING
+
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
-
+#endif // WEBP_USE_SSE2
diff --git a/src/dsp/yuv.c b/src/dsp/yuv.c
index 38895281..7f05f9a3 100644
--- a/src/dsp/yuv.c
+++ b/src/dsp/yuv.c
@@ -33,7 +33,6 @@ void VP8YUVInit(void) {
if (done) {
return;
}
-#ifndef USE_YUVj
for (i = 0; i < 256; ++i) {
VP8kVToR[i] = (89858 * (i - 128) + YUV_HALF) >> YUV_FIX;
VP8kUToG[i] = -22014 * (i - 128) + YUV_HALF;
@@ -45,20 +44,6 @@ void VP8YUVInit(void) {
VP8kClip[i - YUV_RANGE_MIN] = clip(k, 255);
VP8kClip4Bits[i - YUV_RANGE_MIN] = clip((k + 8) >> 4, 15);
}
-#else
- for (i = 0; i < 256; ++i) {
- VP8kVToR[i] = (91881 * (i - 128) + YUV_HALF) >> YUV_FIX;
- VP8kUToG[i] = -22554 * (i - 128) + YUV_HALF;
- VP8kVToG[i] = -46802 * (i - 128);
- VP8kUToB[i] = (116130 * (i - 128) + YUV_HALF) >> YUV_FIX;
- }
- for (i = YUV_RANGE_MIN; i < YUV_RANGE_MAX; ++i) {
- const int k = i;
- VP8kClip[i - YUV_RANGE_MIN] = clip(k, 255);
- VP8kClip4Bits[i - YUV_RANGE_MIN] = clip((k + 8) >> 4, 15);
- }
-#endif
-
done = 1;
}
diff --git a/src/dsp/yuv.h b/src/dsp/yuv.h
index add167ea..ee3587e3 100644
--- a/src/dsp/yuv.h
+++ b/src/dsp/yuv.h
@@ -7,25 +7,6 @@
//
// inline YUV<->RGB conversion function
//
-// The exact naming is Y'CbCr, following the ITU-R BT.601 standard.
-// More information at: http://en.wikipedia.org/wiki/YCbCr
-// Y = 0.2569 * R + 0.5044 * G + 0.0979 * B + 16
-// U = -0.1483 * R - 0.2911 * G + 0.4394 * B + 128
-// V = 0.4394 * R - 0.3679 * G - 0.0715 * B + 128
-// We use 16bit fixed point operations for RGB->YUV conversion.
-//
-// For the Y'CbCr to RGB conversion, the BT.601 specification reads:
-// R = 1.164 * (Y-16) + 1.596 * (V-128)
-// G = 1.164 * (Y-16) - 0.813 * (V-128) - 0.391 * (U-128)
-// B = 1.164 * (Y-16) + 2.018 * (U-128)
-// where Y is in the [16,235] range, and U/V in the [16,240] range.
-// But the common term 1.164 * (Y-16) can be handled as an offset in the
-// VP8kClip[] table. So the formulae should be read as:
-// R = 1.164 * [Y + 1.371 * (V-128) ] - 18.624
-// G = 1.164 * [Y - 0.698 * (V-128) - 0.336 * (U-128)] - 18.624
-// B = 1.164 * [Y + 1.733 * (U-128)] - 18.624
-// once factorized. Here too, 16bit fixed precision is used.
-//
// Author: Skal (pascal.massimino@gmail.com)
#ifndef WEBP_DSP_YUV_H_
@@ -33,15 +14,13 @@
#include "../dec/decode_vp8.h"
-#if defined(WEBP_EXPERIMENTAL_FEATURES)
-// Do NOT activate this feature for real compression. This is only experimental!
-// This flag is for comparison purpose against JPEG's "YUVj" natural colorspace.
-// This colorspace is close to Rec.601's Y'CbCr model with the notable
-// difference of allowing larger range for luma/chroma.
-// See http://en.wikipedia.org/wiki/YCbCr#JPEG_conversion paragraph, and its
-// difference with http://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion
-// #define USE_YUVj
-#endif
+/*
+ * Define ANDROID_WEBP_RGB to enable specific optimizations for Android
+ * RGBA_4444 & RGB_565 color support.
+ *
+ */
+
+#define ANDROID_WEBP_RGB
//------------------------------------------------------------------------------
// YUV -> RGB conversion
@@ -74,16 +53,16 @@ static WEBP_INLINE void VP8YuvToRgb565(uint8_t y, uint8_t u, uint8_t v,
const int r_off = VP8kVToR[v];
const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
const int b_off = VP8kUToB[u];
- const uint8_t rg = ((VP8kClip[y + r_off - YUV_RANGE_MIN] & 0xf8) |
- (VP8kClip[y + g_off - YUV_RANGE_MIN] >> 5));
- const uint8_t gb = (((VP8kClip[y + g_off - YUV_RANGE_MIN] << 3) & 0xe0) |
- (VP8kClip[y + b_off - YUV_RANGE_MIN] >> 3));
-#ifdef WEBP_SWAP_16BIT_CSP
- rgb[0] = gb;
- rgb[1] = rg;
+#ifdef ANDROID_WEBP_RGB
+ rgb[1] = ((VP8kClip[y + r_off - YUV_RANGE_MIN] & 0xf8) |
+ (VP8kClip[y + g_off - YUV_RANGE_MIN] >> 5));
+ rgb[0] = (((VP8kClip[y + g_off - YUV_RANGE_MIN] << 3) & 0xe0) |
+ (VP8kClip[y + b_off - YUV_RANGE_MIN] >> 3));
#else
- rgb[0] = rg;
- rgb[1] = gb;
+ rgb[0] = ((VP8kClip[y + r_off - YUV_RANGE_MIN] & 0xf8) |
+ (VP8kClip[y + g_off - YUV_RANGE_MIN] >> 5));
+ rgb[1] = (((VP8kClip[y + g_off - YUV_RANGE_MIN] << 3) & 0xe0) |
+ (VP8kClip[y + b_off - YUV_RANGE_MIN] >> 3));
#endif
}
@@ -98,15 +77,14 @@ static WEBP_INLINE void VP8YuvToRgba4444(uint8_t y, uint8_t u, uint8_t v,
const int r_off = VP8kVToR[v];
const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
const int b_off = VP8kUToB[u];
- const uint8_t rg = ((VP8kClip4Bits[y + r_off - YUV_RANGE_MIN] << 4) |
- VP8kClip4Bits[y + g_off - YUV_RANGE_MIN]);
- const uint8_t ba = (VP8kClip4Bits[y + b_off - YUV_RANGE_MIN] << 4) | 0x0f;
-#ifdef WEBP_SWAP_16BIT_CSP
- argb[0] = ba;
- argb[1] = rg;
+#ifdef ANDROID_WEBP_RGB
+ argb[1] = ((VP8kClip4Bits[y + r_off - YUV_RANGE_MIN] << 4) |
+ VP8kClip4Bits[y + g_off - YUV_RANGE_MIN]);
+ argb[0] = 0x0f | (VP8kClip4Bits[y + b_off - YUV_RANGE_MIN] << 4);
#else
- argb[0] = rg;
- argb[1] = ba;
+ argb[0] = ((VP8kClip4Bits[y + r_off - YUV_RANGE_MIN] << 4) |
+ VP8kClip4Bits[y + g_off - YUV_RANGE_MIN]);
+ argb[1] = 0x0f | (VP8kClip4Bits[y + b_off - YUV_RANGE_MIN] << 4);
#endif
}
@@ -137,14 +115,18 @@ void VP8YUVInit(void);
//------------------------------------------------------------------------------
// RGB -> YUV conversion
+// The exact naming is Y'CbCr, following the ITU-R BT.601 standard.
+// More information at: http://en.wikipedia.org/wiki/YCbCr
+// Y = 0.2569 * R + 0.5044 * G + 0.0979 * B + 16
+// U = -0.1483 * R - 0.2911 * G + 0.4394 * B + 128
+// V = 0.4394 * R - 0.3679 * G - 0.0715 * B + 128
+// We use 16bit fixed point operations.
static WEBP_INLINE int VP8ClipUV(int v) {
- v = (v + (257 << (YUV_FIX + 2 - 1))) >> (YUV_FIX + 2);
- return ((v & ~0xff) == 0) ? v : (v < 0) ? 0 : 255;
+ v = (v + (257 << (YUV_FIX + 2 - 1))) >> (YUV_FIX + 2);
+ return ((v & ~0xff) == 0) ? v : (v < 0) ? 0 : 255;
}
-#ifndef USE_YUVj
-
static WEBP_INLINE int VP8RGBToY(int r, int g, int b) {
const int kRound = (1 << (YUV_FIX - 1)) + (16 << YUV_FIX);
const int luma = 16839 * r + 33059 * g + 6420 * b;
@@ -152,38 +134,13 @@ static WEBP_INLINE int VP8RGBToY(int r, int g, int b) {
}
static WEBP_INLINE int VP8RGBToU(int r, int g, int b) {
- const int u = -9719 * r - 19081 * g + 28800 * b;
- return VP8ClipUV(u);
-}
-
-static WEBP_INLINE int VP8RGBToV(int r, int g, int b) {
- const int v = +28800 * r - 24116 * g - 4684 * b;
- return VP8ClipUV(v);
-}
-
-#else
-
-// This JPEG-YUV colorspace, only for comparison!
-// These are also 16-bit precision coefficients from Rec.601, but with full
-// [0..255] output range.
-static WEBP_INLINE int VP8RGBToY(int r, int g, int b) {
- const int kRound = (1 << (YUV_FIX - 1));
- const int luma = 19595 * r + 38470 * g + 7471 * b;
- return (luma + kRound) >> YUV_FIX; // no need to clip
-}
-
-static WEBP_INLINE int VP8RGBToU(int r, int g, int b) {
- const int u = -11058 * r - 21710 * g + 32768 * b;
- return VP8ClipUV(u);
+ return VP8ClipUV(-9719 * r - 19081 * g + 28800 * b);
}
static WEBP_INLINE int VP8RGBToV(int r, int g, int b) {
- const int v = 32768 * r - 27439 * g - 5329 * b;
- return VP8ClipUV(v);
+ return VP8ClipUV(+28800 * r - 24116 * g - 4684 * b);
}
-#endif // USE_YUVj
-
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif