aboutsummaryrefslogtreecommitdiff
path: root/src/dsp
diff options
context:
space:
mode:
authorVikas Arora <vikasa@google.com>2013-03-13 16:43:18 -0700
committerVikas Arora <vikasa@google.com>2013-03-14 10:15:22 -0700
commit1e7bf8805bd030c19924a5306837ecd72c295751 (patch)
tree20a7189518b1824f7805016e9718e26e9c3a6668 /src/dsp
parentb6dbce6bfeaabde2a7b581c4c6888d532d32f3ac (diff)
downloadwebp-tools_r22.2.tar.gz
Added 16bit swapping of RGB565 / RGB4444 colorspace. Added ARM/NEON code for decoder/encoder modules. Speedup in WebP compression (method 3 and above). Change-Id: I95a697338bef7c3ea08054eb5f850a97d1889eb9
Diffstat (limited to 'src/dsp')
-rw-r--r--src/dsp/cpu-features.c396
-rw-r--r--src/dsp/cpu-features.h56
-rw-r--r--src/dsp/cpu.c28
-rw-r--r--src/dsp/dec.c15
-rw-r--r--src/dsp/dec_neon.c88
-rw-r--r--src/dsp/dec_sse2.c37
-rw-r--r--src/dsp/dsp.h25
-rw-r--r--src/dsp/enc.c117
-rw-r--r--src/dsp/enc_neon.c661
-rw-r--r--src/dsp/enc_sse2.c321
-rw-r--r--src/dsp/lossless.c354
-rw-r--r--src/dsp/lossless.h16
-rw-r--r--src/dsp/upsampling.c10
-rw-r--r--src/dsp/upsampling_neon.c292
-rw-r--r--src/dsp/upsampling_sse2.c38
-rw-r--r--src/dsp/yuv.c15
-rw-r--r--src/dsp/yuv.h109
17 files changed, 2209 insertions, 369 deletions
diff --git a/src/dsp/cpu-features.c b/src/dsp/cpu-features.c
new file mode 100644
index 00000000..6a5cd8f1
--- /dev/null
+++ b/src/dsp/cpu-features.c
@@ -0,0 +1,396 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <sys/system_properties.h>
+#ifdef __arm__
+#include <machine/cpu-features.h>
+#endif
+#include <pthread.h>
+#include "cpu-features.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+static pthread_once_t g_once;
+static AndroidCpuFamily g_cpuFamily;
+static uint64_t g_cpuFeatures;
+static int g_cpuCount;
+
+static const int android_cpufeatures_debug = 0;
+
+#ifdef __arm__
+# define DEFAULT_CPU_FAMILY ANDROID_CPU_FAMILY_ARM
+#elif defined __i386__
+# define DEFAULT_CPU_FAMILY ANDROID_CPU_FAMILY_X86
+#else
+# define DEFAULT_CPU_FAMILY ANDROID_CPU_FAMILY_UNKNOWN
+#endif
+
+#define D(...) \
+ do { \
+ if (android_cpufeatures_debug) { \
+ printf(__VA_ARGS__); fflush(stdout); \
+ } \
+ } while (0)
+
+#ifdef __i386__
+static __inline__ void x86_cpuid(int func, int values[4])
+{
+ int a, b, c, d;
+ /* We need to preserve ebx since we're compiling PIC code */
+ /* this means we can't use "=b" for the second output register */
+ __asm__ __volatile__ ( \
+ "push %%ebx\n"
+ "cpuid\n" \
+ "mov %1, %%ebx\n"
+ "pop %%ebx\n"
+ : "=a" (a), "=r" (b), "=c" (c), "=d" (d) \
+ : "a" (func) \
+ );
+ values[0] = a;
+ values[1] = b;
+ values[2] = c;
+ values[3] = d;
+}
+#endif
+
+/* Read the content of /proc/cpuinfo into a user-provided buffer.
+ * Return the length of the data, or -1 on error. Does *not*
+ * zero-terminate the content. Will not read more
+ * than 'buffsize' bytes.
+ */
+static int
+read_file(const char* pathname, char* buffer, size_t buffsize)
+{
+ int fd, len;
+
+ fd = open(pathname, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ do {
+ len = read(fd, buffer, buffsize);
+ } while (len < 0 && errno == EINTR);
+
+ close(fd);
+
+ return len;
+}
+
+/* Extract the content of a the first occurence of a given field in
+ * the content of /proc/cpuinfo and return it as a heap-allocated
+ * string that must be freed by the caller.
+ *
+ * Return NULL if not found
+ */
+static char*
+extract_cpuinfo_field(char* buffer, int buflen, const char* field)
+{
+ int fieldlen = strlen(field);
+ char* bufend = buffer + buflen;
+ char* result = NULL;
+ int len, ignore;
+ const char *p, *q;
+
+ /* Look for first field occurence, and ensures it starts the line.
+ */
+ p = buffer;
+ bufend = buffer + buflen;
+ for (;;) {
+ p = memmem(p, bufend-p, field, fieldlen);
+ if (p == NULL)
+ goto EXIT;
+
+ if (p == buffer || p[-1] == '\n')
+ break;
+
+ p += fieldlen;
+ }
+
+ /* Skip to the first column followed by a space */
+ p += fieldlen;
+ p = memchr(p, ':', bufend-p);
+ if (p == NULL || p[1] != ' ')
+ goto EXIT;
+
+ /* Find the end of the line */
+ p += 2;
+ q = memchr(p, '\n', bufend-p);
+ if (q == NULL)
+ q = bufend;
+
+ /* Copy the line into a heap-allocated buffer */
+ len = q-p;
+ result = malloc(len+1);
+ if (result == NULL)
+ goto EXIT;
+
+ memcpy(result, p, len);
+ result[len] = '\0';
+
+EXIT:
+ return result;
+}
+
+/* Count the number of occurences of a given field prefix in /proc/cpuinfo.
+ */
+static int
+count_cpuinfo_field(char* buffer, int buflen, const char* field)
+{
+ int fieldlen = strlen(field);
+ const char* p = buffer;
+ const char* bufend = buffer + buflen;
+ const char* q;
+ int count = 0;
+
+ for (;;) {
+ const char* q;
+
+ p = memmem(p, bufend-p, field, fieldlen);
+ if (p == NULL)
+ break;
+
+ /* Ensure that the field is at the start of a line */
+ if (p > buffer && p[-1] != '\n') {
+ p += fieldlen;
+ continue;
+ }
+
+
+ /* skip any whitespace */
+ q = p + fieldlen;
+ while (q < bufend && (*q == ' ' || *q == '\t'))
+ q++;
+
+ /* we must have a colon now */
+ if (q < bufend && *q == ':') {
+ count += 1;
+ q ++;
+ }
+ p = q;
+ }
+
+ return count;
+}
+
+/* Like strlen(), but for constant string literals */
+#define STRLEN_CONST(x) ((sizeof(x)-1)
+
+
+/* Checks that a space-separated list of items contains one given 'item'.
+ * Returns 1 if found, 0 otherwise.
+ */
+static int
+has_list_item(const char* list, const char* item)
+{
+ const char* p = list;
+ int itemlen = strlen(item);
+
+ if (list == NULL)
+ return 0;
+
+ while (*p) {
+ const char* q;
+
+ /* skip spaces */
+ while (*p == ' ' || *p == '\t')
+ p++;
+
+ /* find end of current list item */
+ q = p;
+ while (*q && *q != ' ' && *q != '\t')
+ q++;
+
+ if (itemlen == q-p && !memcmp(p, item, itemlen))
+ return 1;
+
+ /* skip to next item */
+ p = q;
+ }
+ return 0;
+}
+
+
+static void
+android_cpuInit(void)
+{
+ char cpuinfo[4096];
+ int cpuinfo_len;
+
+ g_cpuFamily = DEFAULT_CPU_FAMILY;
+ g_cpuFeatures = 0;
+ g_cpuCount = 1;
+
+ cpuinfo_len = read_file("/proc/cpuinfo", cpuinfo, sizeof cpuinfo);
+ D("cpuinfo_len is (%d):\n%.*s\n", cpuinfo_len,
+ cpuinfo_len >= 0 ? cpuinfo_len : 0, cpuinfo);
+
+ if (cpuinfo_len < 0) /* should not happen */ {
+ return;
+ }
+
+ /* Count the CPU cores, the value may be 0 for single-core CPUs */
+ g_cpuCount = count_cpuinfo_field(cpuinfo, cpuinfo_len, "processor");
+ if (g_cpuCount == 0) {
+ g_cpuCount = count_cpuinfo_field(cpuinfo, cpuinfo_len, "Processor");
+ if (g_cpuCount == 0) {
+ g_cpuCount = 1;
+ }
+ }
+
+ D("found cpuCount = %d\n", g_cpuCount);
+
+#ifdef __ARM_ARCH__
+ {
+ char* features = NULL;
+ char* architecture = NULL;
+
+ /* Extract architecture from the "CPU Architecture" field.
+ * The list is well-known, unlike the the output of
+ * the 'Processor' field which can vary greatly.
+ *
+ * See the definition of the 'proc_arch' array in
+ * $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in
+ * same file.
+ */
+ char* cpuArch = extract_cpuinfo_field(cpuinfo, cpuinfo_len, "CPU architecture");
+
+ if (cpuArch != NULL) {
+ char* end;
+ long archNumber;
+ int hasARMv7 = 0;
+
+ D("found cpuArch = '%s'\n", cpuArch);
+
+ /* read the initial decimal number, ignore the rest */
+ archNumber = strtol(cpuArch, &end, 10);
+
+ /* Here we assume that ARMv8 will be upwards compatible with v7
+ * in the future. Unfortunately, there is no 'Features' field to
+ * indicate that Thumb-2 is supported.
+ */
+ if (end > cpuArch && archNumber >= 7) {
+ hasARMv7 = 1;
+ }
+
+ /* Unfortunately, it seems that certain ARMv6-based CPUs
+ * report an incorrect architecture number of 7!
+ *
+ * See http://code.google.com/p/android/issues/detail?id=10812
+ *
+ * We try to correct this by looking at the 'elf_format'
+ * field reported by the 'Processor' field, which is of the
+ * form of "(v7l)" for an ARMv7-based CPU, and "(v6l)" for
+ * an ARMv6-one.
+ */
+ if (hasARMv7) {
+ char* cpuProc = extract_cpuinfo_field(cpuinfo, cpuinfo_len,
+ "Processor");
+ if (cpuProc != NULL) {
+ D("found cpuProc = '%s'\n", cpuProc);
+ if (has_list_item(cpuProc, "(v6l)")) {
+ D("CPU processor and architecture mismatch!!\n");
+ hasARMv7 = 0;
+ }
+ free(cpuProc);
+ }
+ }
+
+ if (hasARMv7) {
+ g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_ARMv7;
+ }
+
+ /* The LDREX / STREX instructions are available from ARMv6 */
+ if (archNumber >= 6) {
+ g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_LDREX_STREX;
+ }
+
+ free(cpuArch);
+ }
+
+ /* Extract the list of CPU features from 'Features' field */
+ char* cpuFeatures = extract_cpuinfo_field(cpuinfo, cpuinfo_len, "Features");
+
+ if (cpuFeatures != NULL) {
+
+ D("found cpuFeatures = '%s'\n", cpuFeatures);
+
+ if (has_list_item(cpuFeatures, "vfpv3"))
+ g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_VFPv3;
+
+ else if (has_list_item(cpuFeatures, "vfpv3d16"))
+ g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_VFPv3;
+
+ if (has_list_item(cpuFeatures, "neon")) {
+ /* Note: Certain kernels only report neon but not vfpv3
+ * in their features list. However, ARM mandates
+ * that if Neon is implemented, so must be VFPv3
+ * so always set the flag.
+ */
+ g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_NEON |
+ ANDROID_CPU_ARM_FEATURE_VFPv3;
+ }
+ free(cpuFeatures);
+ }
+ }
+#endif /* __ARM_ARCH__ */
+
+#ifdef __i386__
+ g_cpuFamily = ANDROID_CPU_FAMILY_X86;
+
+ int regs[4];
+
+/* According to http://en.wikipedia.org/wiki/CPUID */
+#define VENDOR_INTEL_b 0x756e6547
+#define VENDOR_INTEL_c 0x6c65746e
+#define VENDOR_INTEL_d 0x49656e69
+
+ x86_cpuid(0, regs);
+ int vendorIsIntel = (regs[1] == VENDOR_INTEL_b &&
+ regs[2] == VENDOR_INTEL_c &&
+ regs[3] == VENDOR_INTEL_d);
+
+ x86_cpuid(1, regs);
+ if ((regs[2] & (1 << 9)) != 0) {
+ g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_SSSE3;
+ }
+ if ((regs[2] & (1 << 23)) != 0) {
+ g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_POPCNT;
+ }
+ if (vendorIsIntel && (regs[2] & (1 << 22)) != 0) {
+ g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_MOVBE;
+ }
+#endif
+}
+
+
+AndroidCpuFamily
+android_getCpuFamily(void)
+{
+ pthread_once(&g_once, android_cpuInit);
+ return g_cpuFamily;
+}
+
+
+uint64_t
+android_getCpuFeatures(void)
+{
+ pthread_once(&g_once, android_cpuInit);
+ return g_cpuFeatures;
+}
+
+
+int
+android_getCpuCount(void)
+{
+ pthread_once(&g_once, android_cpuInit);
+ return g_cpuCount;
+}
diff --git a/src/dsp/cpu-features.h b/src/dsp/cpu-features.h
new file mode 100644
index 00000000..f20c0bc4
--- /dev/null
+++ b/src/dsp/cpu-features.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// You can download Android source at
+// http://source.android.com/source/downloading.html
+// Original files are in ndk/sources/android/cpufeatures
+// Revision is Change-Id: I9a0629efba36a6023f05e5f092e7addcc1b7d2a9
+
+#ifndef CPU_FEATURES_H
+#define CPU_FEATURES_H
+
+#include <sys/cdefs.h>
+#include <stdint.h>
+
+__BEGIN_DECLS
+
+typedef enum {
+ ANDROID_CPU_FAMILY_UNKNOWN = 0,
+ ANDROID_CPU_FAMILY_ARM,
+ ANDROID_CPU_FAMILY_X86,
+
+ ANDROID_CPU_FAMILY_MAX /* do not remove */
+
+} AndroidCpuFamily;
+
+/* Return family of the device's CPU */
+extern AndroidCpuFamily android_getCpuFamily(void);
+
+enum {
+ ANDROID_CPU_ARM_FEATURE_ARMv7 = (1 << 0),
+ ANDROID_CPU_ARM_FEATURE_VFPv3 = (1 << 1),
+ ANDROID_CPU_ARM_FEATURE_NEON = (1 << 2),
+ ANDROID_CPU_ARM_FEATURE_LDREX_STREX = (1 << 3),
+};
+
+enum {
+ ANDROID_CPU_X86_FEATURE_SSSE3 = (1 << 0),
+ ANDROID_CPU_X86_FEATURE_POPCNT = (1 << 1),
+ ANDROID_CPU_X86_FEATURE_MOVBE = (1 << 2),
+};
+
+extern uint64_t android_getCpuFeatures(void);
+
+/* Return the number of CPU cores detected on this device. */
+extern int android_getCpuCount(void);
+
+__END_DECLS
+
+#endif /* CPU_FEATURES_H */
diff --git a/src/dsp/cpu.c b/src/dsp/cpu.c
index 2ee7812d..bf9ae0c7 100644
--- a/src/dsp/cpu.c
+++ b/src/dsp/cpu.c
@@ -11,9 +11,9 @@
#include "./dsp.h"
-//#if defined(__ANDROID__)
-//#include <cpu-features.h>
-//#endif
+#if defined(__ANDROID__)
+#include "./cpu-features.h"
+#endif
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
@@ -57,17 +57,17 @@ static int x86CPUInfo(CPUFeature feature) {
return 0;
}
VP8CPUInfo VP8GetCPUInfo = x86CPUInfo;
-//#elif defined(WEBP_ANDROID_NEON)
-//static int AndroidCPUInfo(CPUFeature feature) {
-// const AndroidCpuFamily cpu_family = android_getCpuFamily();
-// const uint64_t cpu_features = android_getCpuFeatures();
-// if (feature == kNEON) {
-// return (cpu_family == ANDROID_CPU_FAMILY_ARM &&
-// 0 != (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON));
-// }
-// return 0;
-//}
-//VP8CPUInfo VP8GetCPUInfo = AndroidCPUInfo;
+#elif defined(WEBP_ANDROID_NEON)
+static int AndroidCPUInfo(CPUFeature feature) {
+ const AndroidCpuFamily cpu_family = android_getCpuFamily();
+ const uint64_t cpu_features = android_getCpuFeatures();
+ if (feature == kNEON) {
+ return (cpu_family == ANDROID_CPU_FAMILY_ARM &&
+ 0 != (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON));
+ }
+ return 0;
+}
+VP8CPUInfo VP8GetCPUInfo = AndroidCPUInfo;
#elif defined(__ARM_NEON__)
// define a dummy function to enable turning off NEON at runtime by setting
// VP8DecGetCPUInfo = NULL
diff --git a/src/dsp/dec.c b/src/dsp/dec.c
index 9ae7b6fa..758c6a57 100644
--- a/src/dsp/dec.c
+++ b/src/dsp/dec.c
@@ -426,11 +426,16 @@ static void HE8uv(uint8_t *dst) { // horizontal
}
// helper for chroma-DC predictions
-static WEBP_INLINE void Put8x8uv(uint64_t v, uint8_t* dst) {
+static WEBP_INLINE void Put8x8uv(uint8_t value, uint8_t* dst) {
int j;
+#ifndef WEBP_REFERENCE_IMPLEMENTATION
+ const uint64_t v = (uint64_t)value * 0x0101010101010101ULL;
for (j = 0; j < 8; ++j) {
*(uint64_t*)(dst + j * BPS) = v;
}
+#else
+ for (j = 0; j < 8; ++j) memset(dst + j * BPS, value, 8);
+#endif
}
static void DC8uv(uint8_t *dst) { // DC
@@ -439,7 +444,7 @@ static void DC8uv(uint8_t *dst) { // DC
for (i = 0; i < 8; ++i) {
dc0 += dst[i - BPS] + dst[-1 + i * BPS];
}
- Put8x8uv((uint64_t)((dc0 >> 4) * 0x0101010101010101ULL), dst);
+ Put8x8uv(dc0 >> 4, dst);
}
static void DC8uvNoLeft(uint8_t *dst) { // DC with no left samples
@@ -448,7 +453,7 @@ static void DC8uvNoLeft(uint8_t *dst) { // DC with no left samples
for (i = 0; i < 8; ++i) {
dc0 += dst[i - BPS];
}
- Put8x8uv((uint64_t)((dc0 >> 3) * 0x0101010101010101ULL), dst);
+ Put8x8uv(dc0 >> 3, dst);
}
static void DC8uvNoTop(uint8_t *dst) { // DC with no top samples
@@ -457,11 +462,11 @@ static void DC8uvNoTop(uint8_t *dst) { // DC with no top samples
for (i = 0; i < 8; ++i) {
dc0 += dst[-1 + i * BPS];
}
- Put8x8uv((uint64_t)((dc0 >> 3) * 0x0101010101010101ULL), dst);
+ Put8x8uv(dc0 >> 3, dst);
}
static void DC8uvNoTopLeft(uint8_t *dst) { // DC with nothing
- Put8x8uv(0x8080808080808080ULL, dst);
+ Put8x8uv(0x80, dst);
}
//------------------------------------------------------------------------------
diff --git a/src/dsp/dec_neon.c b/src/dsp/dec_neon.c
index ec824b79..5d7cff15 100644
--- a/src/dsp/dec_neon.c
+++ b/src/dsp/dec_neon.c
@@ -12,14 +12,14 @@
#include "./dsp.h"
-#if defined(WEBP_USE_NEON)
-
-#include "../dec/vp8i.h"
-
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
+#if defined(WEBP_USE_NEON)
+
+#include "../dec/vp8i.h"
+
#define QRegs "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", \
"q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
@@ -155,6 +155,9 @@ static void SimpleHFilter16iNEON(uint8_t* p, int stride, int thresh) {
}
}
+//-----------------------------------------------------------------------------
+// Inverse transforms (Paragraph 14.4)
+
static void TransformOneNEON(const int16_t *in, uint8_t *dst) {
const int kBPS = BPS;
const int16_t constants[] = {20091, 17734, 0, 0};
@@ -311,19 +314,92 @@ static void TransformTwoNEON(const int16_t* in, uint8_t* dst, int do_two) {
}
}
+static void TransformWHT(const int16_t* in, int16_t* out) {
+ const int kStep = 32; // The store is only incrementing the pointer as if we
+ // had stored a single byte.
+ __asm__ volatile (
+ // part 1
+ // load data into q0, q1
+ "vld1.16 {q0, q1}, [%[in]] \n"
+
+ "vaddl.s16 q2, d0, d3 \n" // a0 = in[0] + in[12]
+ "vaddl.s16 q3, d1, d2 \n" // a1 = in[4] + in[8]
+ "vsubl.s16 q4, d1, d2 \n" // a2 = in[4] - in[8]
+ "vsubl.s16 q5, d0, d3 \n" // a3 = in[0] - in[12]
+
+ "vadd.s32 q0, q2, q3 \n" // tmp[0] = a0 + a1
+ "vsub.s32 q2, q2, q3 \n" // tmp[8] = a0 - a1
+ "vadd.s32 q1, q5, q4 \n" // tmp[4] = a3 + a2
+ "vsub.s32 q3, q5, q4 \n" // tmp[12] = a3 - a2
+
+ // Transpose
+ // q0 = tmp[0, 4, 8, 12], q1 = tmp[2, 6, 10, 14]
+ // q2 = tmp[1, 5, 9, 13], q3 = tmp[3, 7, 11, 15]
+ "vswp d1, d4 \n" // vtrn.64 q0, q2
+ "vswp d3, d6 \n" // vtrn.64 q1, q3
+ "vtrn.32 q0, q1 \n"
+ "vtrn.32 q2, q3 \n"
+
+ "vmov.s32 q4, #3 \n" // dc = 3
+ "vadd.s32 q0, q0, q4 \n" // dc = tmp[0] + 3
+ "vadd.s32 q6, q0, q3 \n" // a0 = dc + tmp[3]
+ "vadd.s32 q7, q1, q2 \n" // a1 = tmp[1] + tmp[2]
+ "vsub.s32 q8, q1, q2 \n" // a2 = tmp[1] - tmp[2]
+ "vsub.s32 q9, q0, q3 \n" // a3 = dc - tmp[3]
+
+ "vadd.s32 q0, q6, q7 \n"
+ "vshrn.s32 d0, q0, #3 \n" // (a0 + a1) >> 3
+ "vadd.s32 q1, q9, q8 \n"
+ "vshrn.s32 d1, q1, #3 \n" // (a3 + a2) >> 3
+ "vsub.s32 q2, q6, q7 \n"
+ "vshrn.s32 d2, q2, #3 \n" // (a0 - a1) >> 3
+ "vsub.s32 q3, q9, q8 \n"
+ "vshrn.s32 d3, q3, #3 \n" // (a3 - a2) >> 3
+
+ // set the results to output
+ "vst1.16 d0[0], [%[out]], %[kStep] \n"
+ "vst1.16 d1[0], [%[out]], %[kStep] \n"
+ "vst1.16 d2[0], [%[out]], %[kStep] \n"
+ "vst1.16 d3[0], [%[out]], %[kStep] \n"
+ "vst1.16 d0[1], [%[out]], %[kStep] \n"
+ "vst1.16 d1[1], [%[out]], %[kStep] \n"
+ "vst1.16 d2[1], [%[out]], %[kStep] \n"
+ "vst1.16 d3[1], [%[out]], %[kStep] \n"
+ "vst1.16 d0[2], [%[out]], %[kStep] \n"
+ "vst1.16 d1[2], [%[out]], %[kStep] \n"
+ "vst1.16 d2[2], [%[out]], %[kStep] \n"
+ "vst1.16 d3[2], [%[out]], %[kStep] \n"
+ "vst1.16 d0[3], [%[out]], %[kStep] \n"
+ "vst1.16 d1[3], [%[out]], %[kStep] \n"
+ "vst1.16 d2[3], [%[out]], %[kStep] \n"
+ "vst1.16 d3[3], [%[out]], %[kStep] \n"
+
+ : [out] "+r"(out) // modified registers
+ : [in] "r"(in), [kStep] "r"(kStep) // constants
+ : "memory", "q0", "q1", "q2", "q3", "q4",
+ "q5", "q6", "q7", "q8", "q9" // clobbered
+ );
+}
+
+#endif // WEBP_USE_NEON
+
+//------------------------------------------------------------------------------
+// Entry point
+
extern void VP8DspInitNEON(void);
void VP8DspInitNEON(void) {
+#if defined(WEBP_USE_NEON)
VP8Transform = TransformTwoNEON;
+ VP8TransformWHT = TransformWHT;
VP8SimpleVFilter16 = SimpleVFilter16NEON;
VP8SimpleHFilter16 = SimpleHFilter16NEON;
VP8SimpleVFilter16i = SimpleVFilter16iNEON;
VP8SimpleHFilter16i = SimpleHFilter16iNEON;
+#endif // WEBP_USE_NEON
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
-
-#endif // WEBP_USE_NEON
diff --git a/src/dsp/dec_sse2.c b/src/dsp/dec_sse2.c
index 472b68ec..1cac1b84 100644
--- a/src/dsp/dec_sse2.c
+++ b/src/dsp/dec_sse2.c
@@ -12,15 +12,15 @@
#include "./dsp.h"
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
#if defined(WEBP_USE_SSE2)
#include <emmintrin.h>
#include "../dec/vp8i.h"
-#if defined(__cplusplus) || defined(c_plusplus)
-extern "C" {
-#endif
-
//------------------------------------------------------------------------------
// Transforms (Paragraph 14.4)
@@ -194,7 +194,7 @@ static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) {
// Add inverse transform to 'dst' and store.
{
- const __m128i zero = _mm_set1_epi16(0);
+ const __m128i zero = _mm_setzero_si128();
// Load the reference(s).
__m128i dst0, dst1, dst2, dst3;
if (do_two) {
@@ -278,14 +278,14 @@ static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) {
#define GET_NOTHEV(p1, p0, q0, q1, hev_thresh, not_hev) { \
const __m128i zero = _mm_setzero_si128(); \
- const __m128i t1 = MM_ABS(p1, p0); \
- const __m128i t2 = MM_ABS(q1, q0); \
+ const __m128i t_1 = MM_ABS(p1, p0); \
+ const __m128i t_2 = MM_ABS(q1, q0); \
\
const __m128i h = _mm_set1_epi8(hev_thresh); \
- const __m128i t3 = _mm_subs_epu8(t1, h); /* abs(p1 - p0) - hev_tresh */ \
- const __m128i t4 = _mm_subs_epu8(t2, h); /* abs(q1 - q0) - hev_tresh */ \
+ const __m128i t_3 = _mm_subs_epu8(t_1, h); /* abs(p1 - p0) - hev_tresh */ \
+ const __m128i t_4 = _mm_subs_epu8(t_2, h); /* abs(q1 - q0) - hev_tresh */ \
\
- not_hev = _mm_or_si128(t3, t4); \
+ not_hev = _mm_or_si128(t_3, t_4); \
not_hev = _mm_cmpeq_epi8(not_hev, zero); /* not_hev <= t1 && not_hev <= t2 */\
}
@@ -314,13 +314,13 @@ static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) {
// Updates values of 2 pixels at MB edge during complex filtering.
// Update operations:
-// q = q - a and p = p + a; where a = [(a_hi >> 7), (a_lo >> 7)]
+// q = q - delta and p = p + delta; where delta = [(a_hi >> 7), (a_lo >> 7)]
#define UPDATE_2PIXELS(pi, qi, a_lo, a_hi) { \
const __m128i a_lo7 = _mm_srai_epi16(a_lo, 7); \
const __m128i a_hi7 = _mm_srai_epi16(a_hi, 7); \
- const __m128i a = _mm_packs_epi16(a_lo7, a_hi7); \
- pi = _mm_adds_epi8(pi, a); \
- qi = _mm_subs_epi8(qi, a); \
+ const __m128i delta = _mm_packs_epi16(a_lo7, a_hi7); \
+ pi = _mm_adds_epi8(pi, delta); \
+ qi = _mm_subs_epi8(qi, delta); \
}
static void NeedsFilter(const __m128i* p1, const __m128i* p0, const __m128i* q0,
@@ -876,9 +876,15 @@ static void HFilter8iSSE2(uint8_t* u, uint8_t* v, int stride,
Store16x4(u, v, stride, &p1, &p0, &q0, &q1);
}
+#endif // WEBP_USE_SSE2
+
+//------------------------------------------------------------------------------
+// Entry point
+
extern void VP8DspInitSSE2(void);
void VP8DspInitSSE2(void) {
+#if defined(WEBP_USE_SSE2)
VP8Transform = TransformSSE2;
VP8VFilter16 = VFilter16SSE2;
@@ -894,10 +900,9 @@ void VP8DspInitSSE2(void) {
VP8SimpleHFilter16 = SimpleHFilter16SSE2;
VP8SimpleVFilter16i = SimpleVFilter16iSSE2;
VP8SimpleHFilter16i = SimpleHFilter16iSSE2;
+#endif // WEBP_USE_SSE2
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
-
-#endif // WEBP_USE_SSE2
diff --git a/src/dsp/dsp.h b/src/dsp/dsp.h
index 3aad3095..fdf53e7f 100644
--- a/src/dsp/dsp.h
+++ b/src/dsp/dsp.h
@@ -29,11 +29,11 @@ extern "C" {
#define WEBP_USE_SSE2
#endif
-//#if defined(__ANDROID__) && defined(__ARM_ARCH_7A__)
-//#define WEBP_ANDROID_NEON // Android targets that might support NEON
-//#endif
+#if defined(__ANDROID__) && defined(__ARM_ARCH_7A__)
+#define WEBP_ANDROID_NEON // Android targets that might support NEON
+#endif
-#if defined(__ARM_NEON__) || defined(WEBP_ANDROID_NEON)
+#if defined(__ARM_NEON__)
#define WEBP_USE_NEON
#endif
@@ -49,8 +49,6 @@ extern VP8CPUInfo VP8GetCPUInfo;
//------------------------------------------------------------------------------
// Encoding
-int VP8GetAlpha(const int histo[]);
-
// Transforms
// VP8Idct: Does one of two inverse transforms. If do_two is set, the transforms
// will be done for (ref, in, dst) and (ref + 4, in + 16, dst + 4).
@@ -85,10 +83,11 @@ typedef int (*VP8QuantizeBlock)(int16_t in[16], int16_t out[16],
int n, const struct VP8Matrix* const mtx);
extern VP8QuantizeBlock VP8EncQuantizeBlock;
-// Compute susceptibility based on DCT-coeff histograms:
-// the higher, the "easier" the macroblock is to compress.
-typedef int (*VP8CHisto)(const uint8_t* ref, const uint8_t* pred,
- int start_block, int end_block);
+// Collect histogram for susceptibility calculation and accumulate in histo[].
+struct VP8Histogram;
+typedef void (*VP8CHisto)(const uint8_t* ref, const uint8_t* pred,
+ int start_block, int end_block,
+ struct VP8Histogram* const histo);
extern const int VP8DspScan[16 + 4 + 4];
extern VP8CHisto VP8CollectHistogram;
@@ -104,7 +103,7 @@ extern VP8DecIdct2 VP8Transform;
extern VP8DecIdct VP8TransformUV;
extern VP8DecIdct VP8TransformDC;
extern VP8DecIdct VP8TransformDCUV;
-extern void (*VP8TransformWHT)(const int16_t* in, int16_t* out);
+extern VP8WHT VP8TransformWHT;
// *dst is the destination block, with stride BPS. Boundary samples are
// assumed accessible when needed.
@@ -159,6 +158,9 @@ extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
// Initializes SSE2 version of the fancy upsamplers.
void WebPInitUpsamplersSSE2(void);
+// NEON version
+void WebPInitUpsamplersNEON(void);
+
#endif // FANCY_UPSAMPLING
// Point-sampling methods.
@@ -200,6 +202,7 @@ extern void (*WebPApplyAlphaMultiply4444)(
void WebPInitPremultiply(void);
void WebPInitPremultiplySSE2(void); // should not be called directly.
+void WebPInitPremultiplyNEON(void);
//------------------------------------------------------------------------------
diff --git a/src/dsp/enc.c b/src/dsp/enc.c
index 02234564..ae2c830a 100644
--- a/src/dsp/enc.c
+++ b/src/dsp/enc.c
@@ -17,31 +17,18 @@
extern "C" {
#endif
-//------------------------------------------------------------------------------
-// Compute susceptibility based on DCT-coeff histograms:
-// the higher, the "easier" the macroblock is to compress.
-
-static int ClipAlpha(int alpha) {
- return alpha < 0 ? 0 : alpha > 255 ? 255 : alpha;
+static WEBP_INLINE uint8_t clip_8b(int v) {
+ return (!(v & ~0xff)) ? v : (v < 0) ? 0 : 255;
}
-int VP8GetAlpha(const int histo[MAX_COEFF_THRESH + 1]) {
- int num = 0, den = 0, val = 0;
- int k;
- int alpha;
- // note: changing this loop to avoid the numerous "k + 1" slows things down.
- for (k = 0; k < MAX_COEFF_THRESH; ++k) {
- if (histo[k + 1]) {
- val += histo[k + 1];
- num += val * (k + 1);
- den += (k + 1) * (k + 1);
- }
- }
- // we scale the value to a usable [0..255] range
- alpha = den ? 10 * num / den - 5 : 0;
- return ClipAlpha(alpha);
+static WEBP_INLINE int clip_max(int v, int max) {
+ return (v > max) ? max : v;
}
+//------------------------------------------------------------------------------
+// Compute susceptibility based on DCT-coeff histograms:
+// the higher, the "easier" the macroblock is to compress.
+
const int VP8DspScan[16 + 4 + 4] = {
// Luma
0 + 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS,
@@ -53,27 +40,23 @@ const int VP8DspScan[16 + 4 + 4] = {
8 + 0 * BPS, 12 + 0 * BPS, 8 + 4 * BPS, 12 + 4 * BPS // V
};
-static int CollectHistogram(const uint8_t* ref, const uint8_t* pred,
- int start_block, int end_block) {
- int histo[MAX_COEFF_THRESH + 1] = { 0 };
- int16_t out[16];
- int j, k;
+static void CollectHistogram(const uint8_t* ref, const uint8_t* pred,
+ int start_block, int end_block,
+ VP8Histogram* const histo) {
+ int j;
for (j = start_block; j < end_block; ++j) {
- VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out);
+ int k;
+ int16_t out[16];
- // Convert coefficients to bin (within out[]).
- for (k = 0; k < 16; ++k) {
- const int v = abs(out[k]) >> 2;
- out[k] = (v > MAX_COEFF_THRESH) ? MAX_COEFF_THRESH : v;
- }
+ VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out);
- // Use bin to update histogram.
+ // Convert coefficients to bin.
for (k = 0; k < 16; ++k) {
- histo[out[k]]++;
+ const int v = abs(out[k]) >> 3; // TODO(skal): add rounding?
+ const int clipped_value = clip_max(v, MAX_COEFF_THRESH);
+ histo->distribution[clipped_value]++;
}
}
-
- return VP8GetAlpha(histo);
}
//------------------------------------------------------------------------------
@@ -89,15 +72,12 @@ static void InitTables(void) {
if (!tables_ok) {
int i;
for (i = -255; i <= 255 + 255; ++i) {
- clip1[255 + i] = (i < 0) ? 0 : (i > 255) ? 255 : i;
+ clip1[255 + i] = clip_8b(i);
}
tables_ok = 1;
}
}
-static WEBP_INLINE uint8_t clip_8b(int v) {
- return (!(v & ~0xff)) ? v : v < 0 ? 0 : 255;
-}
//------------------------------------------------------------------------------
// Transforms (Paragraph 14.4)
@@ -154,25 +134,25 @@ static void FTransform(const uint8_t* src, const uint8_t* ref, int16_t* out) {
int i;
int tmp[16];
for (i = 0; i < 4; ++i, src += BPS, ref += BPS) {
- const int d0 = src[0] - ref[0];
+ const int d0 = src[0] - ref[0]; // 9bit dynamic range ([-255,255])
const int d1 = src[1] - ref[1];
const int d2 = src[2] - ref[2];
const int d3 = src[3] - ref[3];
- const int a0 = (d0 + d3) << 3;
- const int a1 = (d1 + d2) << 3;
- const int a2 = (d1 - d2) << 3;
- const int a3 = (d0 - d3) << 3;
- tmp[0 + i * 4] = (a0 + a1);
- tmp[1 + i * 4] = (a2 * 2217 + a3 * 5352 + 14500) >> 12;
- tmp[2 + i * 4] = (a0 - a1);
- tmp[3 + i * 4] = (a3 * 2217 - a2 * 5352 + 7500) >> 12;
+ const int a0 = (d0 + d3); // 10b [-510,510]
+ const int a1 = (d1 + d2);
+ const int a2 = (d1 - d2);
+ const int a3 = (d0 - d3);
+ tmp[0 + i * 4] = (a0 + a1) << 3; // 14b [-8160,8160]
+ tmp[1 + i * 4] = (a2 * 2217 + a3 * 5352 + 1812) >> 9; // [-7536,7542]
+ tmp[2 + i * 4] = (a0 - a1) << 3;
+ tmp[3 + i * 4] = (a3 * 2217 - a2 * 5352 + 937) >> 9;
}
for (i = 0; i < 4; ++i) {
- const int a0 = (tmp[0 + i] + tmp[12 + i]);
+ const int a0 = (tmp[0 + i] + tmp[12 + i]); // 15b
const int a1 = (tmp[4 + i] + tmp[ 8 + i]);
const int a2 = (tmp[4 + i] - tmp[ 8 + i]);
const int a3 = (tmp[0 + i] - tmp[12 + i]);
- out[0 + i] = (a0 + a1 + 7) >> 4;
+ out[0 + i] = (a0 + a1 + 7) >> 4; // 12b
out[4 + i] = ((a2 * 2217 + a3 * 5352 + 12000) >> 16) + (a3 != 0);
out[8 + i] = (a0 - a1 + 7) >> 4;
out[12+ i] = ((a3 * 2217 - a2 * 5352 + 51000) >> 16);
@@ -589,30 +569,30 @@ static int TTransform(const uint8_t* in, const uint16_t* w) {
int i;
// horizontal pass
for (i = 0; i < 4; ++i, in += BPS) {
- const int a0 = (in[0] + in[2]) << 2;
- const int a1 = (in[1] + in[3]) << 2;
- const int a2 = (in[1] - in[3]) << 2;
- const int a3 = (in[0] - in[2]) << 2;
- tmp[0 + i * 4] = a0 + a1 + (a0 != 0);
+ const int a0 = in[0] + in[2];
+ const int a1 = in[1] + in[3];
+ const int a2 = in[1] - in[3];
+ const int a3 = in[0] - in[2];
+ tmp[0 + i * 4] = a0 + a1;
tmp[1 + i * 4] = a3 + a2;
tmp[2 + i * 4] = a3 - a2;
tmp[3 + i * 4] = a0 - a1;
}
// vertical pass
for (i = 0; i < 4; ++i, ++w) {
- const int a0 = (tmp[0 + i] + tmp[8 + i]);
- const int a1 = (tmp[4 + i] + tmp[12+ i]);
- const int a2 = (tmp[4 + i] - tmp[12+ i]);
- const int a3 = (tmp[0 + i] - tmp[8 + i]);
+ const int a0 = tmp[0 + i] + tmp[8 + i];
+ const int a1 = tmp[4 + i] + tmp[12+ i];
+ const int a2 = tmp[4 + i] - tmp[12+ i];
+ const int a3 = tmp[0 + i] - tmp[8 + i];
const int b0 = a0 + a1;
const int b1 = a3 + a2;
const int b2 = a3 - a2;
const int b3 = a0 - a1;
- // abs((b + (b<0) + 3) >> 3) = (abs(b) + 3) >> 3
- sum += w[ 0] * ((abs(b0) + 3) >> 3);
- sum += w[ 4] * ((abs(b1) + 3) >> 3);
- sum += w[ 8] * ((abs(b2) + 3) >> 3);
- sum += w[12] * ((abs(b3) + 3) >> 3);
+
+ sum += w[ 0] * abs(b0);
+ sum += w[ 4] * abs(b1);
+ sum += w[ 8] * abs(b2);
+ sum += w[12] * abs(b3);
}
return sum;
}
@@ -621,7 +601,7 @@ static int Disto4x4(const uint8_t* const a, const uint8_t* const b,
const uint16_t* const w) {
const int sum1 = TTransform(a, w);
const int sum2 = TTransform(b, w);
- return (abs(sum2 - sum1) + 8) >> 4;
+ return abs(sum2 - sum1) >> 5;
}
static int Disto16x16(const uint8_t* const a, const uint8_t* const b,
@@ -706,6 +686,7 @@ VP8QuantizeBlock VP8EncQuantizeBlock;
VP8BlockCopy VP8Copy4x4;
extern void VP8EncDspInitSSE2(void);
+extern void VP8EncDspInitNEON(void);
void VP8EncDspInit(void) {
InitTables();
@@ -734,6 +715,10 @@ void VP8EncDspInit(void) {
if (VP8GetCPUInfo(kSSE2)) {
VP8EncDspInitSSE2();
}
+#elif defined(WEBP_USE_NEON)
+ if (VP8GetCPUInfo(kNEON)) {
+ VP8EncDspInitNEON();
+ }
#endif
}
}
diff --git a/src/dsp/enc_neon.c b/src/dsp/enc_neon.c
new file mode 100644
index 00000000..b5a1fbaf
--- /dev/null
+++ b/src/dsp/enc_neon.c
@@ -0,0 +1,661 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// ARM NEON version of speed-critical encoding functions.
+//
+// adapted from libvpx (http://www.webmproject.org/code/)
+
+#include "./dsp.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+#if defined(WEBP_USE_NEON)
+
+#include "../enc/vp8enci.h"
+
+//------------------------------------------------------------------------------
+// Transforms (Paragraph 14.4)
+
+// Inverse transform.
+// This code is pretty much the same as TransformOneNEON in the decoder, except
+// for subtraction to *ref. See the comments there for algorithmic explanations.
+static void ITransformOne(const uint8_t* ref,
+ const int16_t* in, uint8_t* dst) {
+ const int kBPS = BPS;
+ const int16_t kC1C2[] = { 20091, 17734, 0, 0 }; // kC1 / (kC2 >> 1) / 0 / 0
+
+ __asm__ volatile (
+ "vld1.16 {q1, q2}, [%[in]] \n"
+ "vld1.16 {d0}, [%[kC1C2]] \n"
+
+ // d2: in[0]
+ // d3: in[8]
+ // d4: in[4]
+ // d5: in[12]
+ "vswp d3, d4 \n"
+
+ // q8 = {in[4], in[12]} * kC1 * 2 >> 16
+ // q9 = {in[4], in[12]} * kC2 >> 16
+ "vqdmulh.s16 q8, q2, d0[0] \n"
+ "vqdmulh.s16 q9, q2, d0[1] \n"
+
+ // d22 = a = in[0] + in[8]
+ // d23 = b = in[0] - in[8]
+ "vqadd.s16 d22, d2, d3 \n"
+ "vqsub.s16 d23, d2, d3 \n"
+
+ // q8 = in[4]/[12] * kC1 >> 16
+ "vshr.s16 q8, q8, #1 \n"
+
+ // Add {in[4], in[12]} back after the multiplication.
+ "vqadd.s16 q8, q2, q8 \n"
+
+ // d20 = c = in[4]*kC2 - in[12]*kC1
+ // d21 = d = in[4]*kC1 + in[12]*kC2
+ "vqsub.s16 d20, d18, d17 \n"
+ "vqadd.s16 d21, d19, d16 \n"
+
+ // d2 = tmp[0] = a + d
+ // d3 = tmp[1] = b + c
+ // d4 = tmp[2] = b - c
+ // d5 = tmp[3] = a - d
+ "vqadd.s16 d2, d22, d21 \n"
+ "vqadd.s16 d3, d23, d20 \n"
+ "vqsub.s16 d4, d23, d20 \n"
+ "vqsub.s16 d5, d22, d21 \n"
+
+ "vzip.16 q1, q2 \n"
+ "vzip.16 q1, q2 \n"
+
+ "vswp d3, d4 \n"
+
+ // q8 = {tmp[4], tmp[12]} * kC1 * 2 >> 16
+ // q9 = {tmp[4], tmp[12]} * kC2 >> 16
+ "vqdmulh.s16 q8, q2, d0[0] \n"
+ "vqdmulh.s16 q9, q2, d0[1] \n"
+
+ // d22 = a = tmp[0] + tmp[8]
+ // d23 = b = tmp[0] - tmp[8]
+ "vqadd.s16 d22, d2, d3 \n"
+ "vqsub.s16 d23, d2, d3 \n"
+
+ "vshr.s16 q8, q8, #1 \n"
+ "vqadd.s16 q8, q2, q8 \n"
+
+ // d20 = c = in[4]*kC2 - in[12]*kC1
+ // d21 = d = in[4]*kC1 + in[12]*kC2
+ "vqsub.s16 d20, d18, d17 \n"
+ "vqadd.s16 d21, d19, d16 \n"
+
+ // d2 = tmp[0] = a + d
+ // d3 = tmp[1] = b + c
+ // d4 = tmp[2] = b - c
+ // d5 = tmp[3] = a - d
+ "vqadd.s16 d2, d22, d21 \n"
+ "vqadd.s16 d3, d23, d20 \n"
+ "vqsub.s16 d4, d23, d20 \n"
+ "vqsub.s16 d5, d22, d21 \n"
+
+ "vld1.32 d6[0], [%[ref]], %[kBPS] \n"
+ "vld1.32 d6[1], [%[ref]], %[kBPS] \n"
+ "vld1.32 d7[0], [%[ref]], %[kBPS] \n"
+ "vld1.32 d7[1], [%[ref]], %[kBPS] \n"
+
+ "sub %[ref], %[ref], %[kBPS], lsl #2 \n"
+
+ // (val) + 4 >> 3
+ "vrshr.s16 d2, d2, #3 \n"
+ "vrshr.s16 d3, d3, #3 \n"
+ "vrshr.s16 d4, d4, #3 \n"
+ "vrshr.s16 d5, d5, #3 \n"
+
+ "vzip.16 q1, q2 \n"
+ "vzip.16 q1, q2 \n"
+
+ // Must accumulate before saturating
+ "vmovl.u8 q8, d6 \n"
+ "vmovl.u8 q9, d7 \n"
+
+ "vqadd.s16 q1, q1, q8 \n"
+ "vqadd.s16 q2, q2, q9 \n"
+
+ "vqmovun.s16 d0, q1 \n"
+ "vqmovun.s16 d1, q2 \n"
+
+ "vst1.32 d0[0], [%[dst]], %[kBPS] \n"
+ "vst1.32 d0[1], [%[dst]], %[kBPS] \n"
+ "vst1.32 d1[0], [%[dst]], %[kBPS] \n"
+ "vst1.32 d1[1], [%[dst]] \n"
+
+ : [in] "+r"(in), [dst] "+r"(dst) // modified registers
+ : [kBPS] "r"(kBPS), [kC1C2] "r"(kC1C2), [ref] "r"(ref) // constants
+ : "memory", "q0", "q1", "q2", "q8", "q9", "q10", "q11" // clobbered
+ );
+}
+
+static void ITransform(const uint8_t* ref,
+ const int16_t* in, uint8_t* dst, int do_two) {
+ ITransformOne(ref, in, dst);
+ if (do_two) {
+ ITransformOne(ref + 4, in + 16, dst + 4);
+ }
+}
+
+// Same code as dec_neon.c
+static void ITransformWHT(const int16_t* in, int16_t* out) {
+ const int kStep = 32; // The store is only incrementing the pointer as if we
+ // had stored a single byte.
+ __asm__ volatile (
+ // part 1
+ // load data into q0, q1
+ "vld1.16 {q0, q1}, [%[in]] \n"
+
+ "vaddl.s16 q2, d0, d3 \n" // a0 = in[0] + in[12]
+ "vaddl.s16 q3, d1, d2 \n" // a1 = in[4] + in[8]
+ "vsubl.s16 q4, d1, d2 \n" // a2 = in[4] - in[8]
+ "vsubl.s16 q5, d0, d3 \n" // a3 = in[0] - in[12]
+
+ "vadd.s32 q0, q2, q3 \n" // tmp[0] = a0 + a1
+ "vsub.s32 q2, q2, q3 \n" // tmp[8] = a0 - a1
+ "vadd.s32 q1, q5, q4 \n" // tmp[4] = a3 + a2
+ "vsub.s32 q3, q5, q4 \n" // tmp[12] = a3 - a2
+
+ // Transpose
+ // q0 = tmp[0, 4, 8, 12], q1 = tmp[2, 6, 10, 14]
+ // q2 = tmp[1, 5, 9, 13], q3 = tmp[3, 7, 11, 15]
+ "vswp d1, d4 \n" // vtrn.64 q0, q2
+ "vswp d3, d6 \n" // vtrn.64 q1, q3
+ "vtrn.32 q0, q1 \n"
+ "vtrn.32 q2, q3 \n"
+
+ "vmov.s32 q4, #3 \n" // dc = 3
+ "vadd.s32 q0, q0, q4 \n" // dc = tmp[0] + 3
+ "vadd.s32 q6, q0, q3 \n" // a0 = dc + tmp[3]
+ "vadd.s32 q7, q1, q2 \n" // a1 = tmp[1] + tmp[2]
+ "vsub.s32 q8, q1, q2 \n" // a2 = tmp[1] - tmp[2]
+ "vsub.s32 q9, q0, q3 \n" // a3 = dc - tmp[3]
+
+ "vadd.s32 q0, q6, q7 \n"
+ "vshrn.s32 d0, q0, #3 \n" // (a0 + a1) >> 3
+ "vadd.s32 q1, q9, q8 \n"
+ "vshrn.s32 d1, q1, #3 \n" // (a3 + a2) >> 3
+ "vsub.s32 q2, q6, q7 \n"
+ "vshrn.s32 d2, q2, #3 \n" // (a0 - a1) >> 3
+ "vsub.s32 q3, q9, q8 \n"
+ "vshrn.s32 d3, q3, #3 \n" // (a3 - a2) >> 3
+
+ // set the results to output
+ "vst1.16 d0[0], [%[out]], %[kStep] \n"
+ "vst1.16 d1[0], [%[out]], %[kStep] \n"
+ "vst1.16 d2[0], [%[out]], %[kStep] \n"
+ "vst1.16 d3[0], [%[out]], %[kStep] \n"
+ "vst1.16 d0[1], [%[out]], %[kStep] \n"
+ "vst1.16 d1[1], [%[out]], %[kStep] \n"
+ "vst1.16 d2[1], [%[out]], %[kStep] \n"
+ "vst1.16 d3[1], [%[out]], %[kStep] \n"
+ "vst1.16 d0[2], [%[out]], %[kStep] \n"
+ "vst1.16 d1[2], [%[out]], %[kStep] \n"
+ "vst1.16 d2[2], [%[out]], %[kStep] \n"
+ "vst1.16 d3[2], [%[out]], %[kStep] \n"
+ "vst1.16 d0[3], [%[out]], %[kStep] \n"
+ "vst1.16 d1[3], [%[out]], %[kStep] \n"
+ "vst1.16 d2[3], [%[out]], %[kStep] \n"
+ "vst1.16 d3[3], [%[out]], %[kStep] \n"
+
+ : [out] "+r"(out) // modified registers
+ : [in] "r"(in), [kStep] "r"(kStep) // constants
+ : "memory", "q0", "q1", "q2", "q3", "q4",
+ "q5", "q6", "q7", "q8", "q9" // clobbered
+ );
+}
+
+// Forward transform.
+
+// adapted from vp8/encoder/arm/neon/shortfdct_neon.asm
+static const int16_t kCoeff16[] = {
+ 5352, 5352, 5352, 5352, 2217, 2217, 2217, 2217
+};
+static const int32_t kCoeff32[] = {
+ 1812, 1812, 1812, 1812,
+ 937, 937, 937, 937,
+ 12000, 12000, 12000, 12000,
+ 51000, 51000, 51000, 51000
+};
+
+static void FTransform(const uint8_t* src, const uint8_t* ref,
+ int16_t* out) {
+ const int kBPS = BPS;
+ const uint8_t* src_ptr = src;
+ const uint8_t* ref_ptr = ref;
+ const int16_t* coeff16 = kCoeff16;
+ const int32_t* coeff32 = kCoeff32;
+
+ __asm__ volatile (
+ // load src into q4, q5 in high half
+ "vld1.8 {d8}, [%[src_ptr]], %[kBPS] \n"
+ "vld1.8 {d10}, [%[src_ptr]], %[kBPS] \n"
+ "vld1.8 {d9}, [%[src_ptr]], %[kBPS] \n"
+ "vld1.8 {d11}, [%[src_ptr]] \n"
+
+ // load ref into q6, q7 in high half
+ "vld1.8 {d12}, [%[ref_ptr]], %[kBPS] \n"
+ "vld1.8 {d14}, [%[ref_ptr]], %[kBPS] \n"
+ "vld1.8 {d13}, [%[ref_ptr]], %[kBPS] \n"
+ "vld1.8 {d15}, [%[ref_ptr]] \n"
+
+ // Pack the high values in to q4 and q6
+ "vtrn.32 q4, q5 \n"
+ "vtrn.32 q6, q7 \n"
+
+ // d[0-3] = src - ref
+ "vsubl.u8 q0, d8, d12 \n"
+ "vsubl.u8 q1, d9, d13 \n"
+
+ // load coeff16 into q8(d16=5352, d17=2217)
+ "vld1.16 {q8}, [%[coeff16]] \n"
+
+ // load coeff32 high half into q9 = 1812, q10 = 937
+ "vld1.32 {q9, q10}, [%[coeff32]]! \n"
+
+ // load coeff32 low half into q11=12000, q12=51000
+ "vld1.32 {q11,q12}, [%[coeff32]] \n"
+
+ // part 1
+ // Transpose. Register dN is the same as dN in C
+ "vtrn.32 d0, d2 \n"
+ "vtrn.32 d1, d3 \n"
+ "vtrn.16 d0, d1 \n"
+ "vtrn.16 d2, d3 \n"
+
+ "vadd.s16 d4, d0, d3 \n" // a0 = d0 + d3
+ "vadd.s16 d5, d1, d2 \n" // a1 = d1 + d2
+ "vsub.s16 d6, d1, d2 \n" // a2 = d1 - d2
+ "vsub.s16 d7, d0, d3 \n" // a3 = d0 - d3
+
+ "vadd.s16 d0, d4, d5 \n" // a0 + a1
+ "vshl.s16 d0, d0, #3 \n" // temp[0+i*4] = (a0+a1) << 3
+ "vsub.s16 d2, d4, d5 \n" // a0 - a1
+ "vshl.s16 d2, d2, #3 \n" // (temp[2+i*4] = (a0-a1) << 3
+
+ "vmlal.s16 q9, d7, d16 \n" // a3*5352 + 1812
+ "vmlal.s16 q10, d7, d17 \n" // a3*2217 + 937
+ "vmlal.s16 q9, d6, d17 \n" // a2*2217 + a3*5352 + 1812
+ "vmlsl.s16 q10, d6, d16 \n" // a3*2217 + 937 - a2*5352
+
+ // temp[1+i*4] = (d2*2217 + d3*5352 + 1812) >> 9
+ // temp[3+i*4] = (d3*2217 + 937 - d2*5352) >> 9
+ "vshrn.s32 d1, q9, #9 \n"
+ "vshrn.s32 d3, q10, #9 \n"
+
+ // part 2
+ // transpose d0=ip[0], d1=ip[4], d2=ip[8], d3=ip[12]
+ "vtrn.32 d0, d2 \n"
+ "vtrn.32 d1, d3 \n"
+ "vtrn.16 d0, d1 \n"
+ "vtrn.16 d2, d3 \n"
+
+ "vmov.s16 d26, #7 \n"
+
+ "vadd.s16 d4, d0, d3 \n" // a1 = ip[0] + ip[12]
+ "vadd.s16 d5, d1, d2 \n" // b1 = ip[4] + ip[8]
+ "vsub.s16 d6, d1, d2 \n" // c1 = ip[4] - ip[8]
+ "vadd.s16 d4, d4, d26 \n" // a1 + 7
+ "vsub.s16 d7, d0, d3 \n" // d1 = ip[0] - ip[12]
+
+ "vadd.s16 d0, d4, d5 \n" // op[0] = a1 + b1 + 7
+ "vsub.s16 d2, d4, d5 \n" // op[8] = a1 - b1 + 7
+
+ "vmlal.s16 q11, d7, d16 \n" // d1*5352 + 12000
+ "vmlal.s16 q12, d7, d17 \n" // d1*2217 + 51000
+
+ "vceq.s16 d4, d7, #0 \n"
+
+ "vshr.s16 d0, d0, #4 \n"
+ "vshr.s16 d2, d2, #4 \n"
+
+ "vmlal.s16 q11, d6, d17 \n" // c1*2217 + d1*5352 + 12000
+ "vmlsl.s16 q12, d6, d16 \n" // d1*2217 - c1*5352 + 51000
+
+ "vmvn.s16 d4, d4 \n"
+ // op[4] = (c1*2217 + d1*5352 + 12000)>>16
+ "vshrn.s32 d1, q11, #16 \n"
+ // op[4] += (d1!=0)
+ "vsub.s16 d1, d1, d4 \n"
+ // op[12]= (d1*2217 - c1*5352 + 51000)>>16
+ "vshrn.s32 d3, q12, #16 \n"
+
+ // set result to out array
+ "vst1.16 {q0, q1}, [%[out]] \n"
+ : [src_ptr] "+r"(src_ptr), [ref_ptr] "+r"(ref_ptr),
+ [coeff32] "+r"(coeff32) // modified registers
+ : [kBPS] "r"(kBPS), [coeff16] "r"(coeff16),
+ [out] "r"(out) // constants
+ : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9",
+ "q10", "q11", "q12", "q13" // clobbered
+ );
+}
+
+static void FTransformWHT(const int16_t* in, int16_t* out) {
+ const int kStep = 32;
+ __asm__ volatile (
+ // d0 = in[0 * 16] , d1 = in[1 * 16]
+ // d2 = in[2 * 16] , d3 = in[3 * 16]
+ "vld1.16 d0[0], [%[in]], %[kStep] \n"
+ "vld1.16 d1[0], [%[in]], %[kStep] \n"
+ "vld1.16 d2[0], [%[in]], %[kStep] \n"
+ "vld1.16 d3[0], [%[in]], %[kStep] \n"
+ "vld1.16 d0[1], [%[in]], %[kStep] \n"
+ "vld1.16 d1[1], [%[in]], %[kStep] \n"
+ "vld1.16 d2[1], [%[in]], %[kStep] \n"
+ "vld1.16 d3[1], [%[in]], %[kStep] \n"
+ "vld1.16 d0[2], [%[in]], %[kStep] \n"
+ "vld1.16 d1[2], [%[in]], %[kStep] \n"
+ "vld1.16 d2[2], [%[in]], %[kStep] \n"
+ "vld1.16 d3[2], [%[in]], %[kStep] \n"
+ "vld1.16 d0[3], [%[in]], %[kStep] \n"
+ "vld1.16 d1[3], [%[in]], %[kStep] \n"
+ "vld1.16 d2[3], [%[in]], %[kStep] \n"
+ "vld1.16 d3[3], [%[in]], %[kStep] \n"
+
+ "vaddl.s16 q2, d0, d2 \n"
+ "vshl.s32 q2, q2, #2 \n" // a0=(in[0*16]+in[2*16])<<2
+ "vaddl.s16 q3, d1, d3 \n"
+ "vshl.s32 q3, q3, #2 \n" // a1=(in[1*16]+in[3*16])<<2
+ "vsubl.s16 q4, d1, d3 \n"
+ "vshl.s32 q4, q4, #2 \n" // a2=(in[1*16]-in[3*16])<<2
+ "vsubl.s16 q5, d0, d2 \n"
+ "vshl.s32 q5, q5, #2 \n" // a3=(in[0*16]-in[2*16])<<2
+
+ "vceq.s32 q10, q2, #0 \n"
+ "vmvn.s32 q10, q10 \n" // (a0 != 0)
+ "vqadd.s32 q6, q2, q3 \n" // (a0 + a1)
+ "vqsub.s32 q6, q6, q10 \n" // (a0 + a1) + (a0 != 0)
+ "vqadd.s32 q7, q5, q4 \n" // a3 + a2
+ "vqsub.s32 q8, q5, q4 \n" // a3 - a2
+ "vqsub.s32 q9, q2, q3 \n" // a0 - a1
+
+ // Transpose
+ // q6 = tmp[0, 1, 2, 3] ; q7 = tmp[ 4, 5, 6, 7]
+ // q8 = tmp[8, 9, 10, 11] ; q9 = tmp[12, 13, 14, 15]
+ "vswp d13, d16 \n" // vtrn.64 q0, q2
+ "vswp d15, d18 \n" // vtrn.64 q1, q3
+ "vtrn.32 q6, q7 \n"
+ "vtrn.32 q8, q9 \n"
+
+ "vqadd.s32 q0, q6, q8 \n" // a0 = tmp[0] + tmp[8]
+ "vqadd.s32 q1, q7, q9 \n" // a1 = tmp[4] + tmp[12]
+ "vqsub.s32 q2, q7, q9 \n" // a2 = tmp[4] - tmp[12]
+ "vqsub.s32 q3, q6, q8 \n" // a3 = tmp[0] - tmp[8]
+
+ "vqadd.s32 q4, q0, q1 \n" // b0 = a0 + a1
+ "vqadd.s32 q5, q3, q2 \n" // b1 = a3 + a2
+ "vqsub.s32 q6, q3, q2 \n" // b2 = a3 - a2
+ "vqsub.s32 q7, q0, q1 \n" // b3 = a0 - a1
+
+ "vmov.s32 q0, #3 \n" // q0 = 3
+
+ "vcgt.s32 q1, q4, #0 \n" // (b0>0)
+ "vqsub.s32 q2, q4, q1 \n" // (b0+(b0>0))
+ "vqadd.s32 q3, q2, q0 \n" // (b0+(b0>0)+3)
+ "vshrn.s32 d18, q3, #3 \n" // (b0+(b0>0)+3) >> 3
+
+ "vcgt.s32 q1, q5, #0 \n" // (b1>0)
+ "vqsub.s32 q2, q5, q1 \n" // (b1+(b1>0))
+ "vqadd.s32 q3, q2, q0 \n" // (b1+(b1>0)+3)
+ "vshrn.s32 d19, q3, #3 \n" // (b1+(b1>0)+3) >> 3
+
+ "vcgt.s32 q1, q6, #0 \n" // (b2>0)
+ "vqsub.s32 q2, q6, q1 \n" // (b2+(b2>0))
+ "vqadd.s32 q3, q2, q0 \n" // (b2+(b2>0)+3)
+ "vshrn.s32 d20, q3, #3 \n" // (b2+(b2>0)+3) >> 3
+
+ "vcgt.s32 q1, q7, #0 \n" // (b3>0)
+ "vqsub.s32 q2, q7, q1 \n" // (b3+(b3>0))
+ "vqadd.s32 q3, q2, q0 \n" // (b3+(b3>0)+3)
+ "vshrn.s32 d21, q3, #3 \n" // (b3+(b3>0)+3) >> 3
+
+ "vst1.16 {q9, q10}, [%[out]] \n"
+
+ : [in] "+r"(in)
+ : [kStep] "r"(kStep), [out] "r"(out)
+ : "memory", "q0", "q1", "q2", "q3", "q4", "q5",
+ "q6", "q7", "q8", "q9", "q10" // clobbered
+ ) ;
+}
+
+//------------------------------------------------------------------------------
+// Texture distortion
+//
+// We try to match the spectral content (weighted) between source and
+// reconstructed samples.
+
+// Hadamard transform
+// Returns the weighted sum of the absolute value of transformed coefficients.
+// This uses a TTransform helper function in C
+static int Disto4x4(const uint8_t* const a, const uint8_t* const b,
+ const uint16_t* const w) {
+ const int kBPS = BPS;
+ const uint8_t* A = a;
+ const uint8_t* B = b;
+ const uint16_t* W = w;
+ int sum;
+ __asm__ volatile (
+ "vld1.32 d0[0], [%[a]], %[kBPS] \n"
+ "vld1.32 d0[1], [%[a]], %[kBPS] \n"
+ "vld1.32 d2[0], [%[a]], %[kBPS] \n"
+ "vld1.32 d2[1], [%[a]] \n"
+
+ "vld1.32 d1[0], [%[b]], %[kBPS] \n"
+ "vld1.32 d1[1], [%[b]], %[kBPS] \n"
+ "vld1.32 d3[0], [%[b]], %[kBPS] \n"
+ "vld1.32 d3[1], [%[b]] \n"
+
+ // a d0/d2, b d1/d3
+ // d0/d1: 01 01 01 01
+ // d2/d3: 23 23 23 23
+ // But: it goes 01 45 23 67
+ // Notice the middle values are transposed
+ "vtrn.16 q0, q1 \n"
+
+ // {a0, a1} = {in[0] + in[2], in[1] + in[3]}
+ "vaddl.u8 q2, d0, d2 \n"
+ "vaddl.u8 q10, d1, d3 \n"
+ // {a3, a2} = {in[0] - in[2], in[1] - in[3]}
+ "vsubl.u8 q3, d0, d2 \n"
+ "vsubl.u8 q11, d1, d3 \n"
+
+ // tmp[0] = a0 + a1
+ "vpaddl.s16 q0, q2 \n"
+ "vpaddl.s16 q8, q10 \n"
+
+ // tmp[1] = a3 + a2
+ "vpaddl.s16 q1, q3 \n"
+ "vpaddl.s16 q9, q11 \n"
+
+ // No pair subtract
+ // q2 = {a0, a3}
+ // q3 = {a1, a2}
+ "vtrn.16 q2, q3 \n"
+ "vtrn.16 q10, q11 \n"
+
+ // {tmp[3], tmp[2]} = {a0 - a1, a3 - a2}
+ "vsubl.s16 q12, d4, d6 \n"
+ "vsubl.s16 q13, d5, d7 \n"
+ "vsubl.s16 q14, d20, d22 \n"
+ "vsubl.s16 q15, d21, d23 \n"
+
+ // separate tmp[3] and tmp[2]
+ // q12 = tmp[3]
+ // q13 = tmp[2]
+ "vtrn.32 q12, q13 \n"
+ "vtrn.32 q14, q15 \n"
+
+ // Transpose tmp for a
+ "vswp d1, d26 \n" // vtrn.64
+ "vswp d3, d24 \n" // vtrn.64
+ "vtrn.32 q0, q1 \n"
+ "vtrn.32 q13, q12 \n"
+
+ // Transpose tmp for b
+ "vswp d17, d30 \n" // vtrn.64
+ "vswp d19, d28 \n" // vtrn.64
+ "vtrn.32 q8, q9 \n"
+ "vtrn.32 q15, q14 \n"
+
+ // The first Q register is a, the second b.
+ // q0/8 tmp[0-3]
+ // q13/15 tmp[4-7]
+ // q1/9 tmp[8-11]
+ // q12/14 tmp[12-15]
+
+ // These are still in 01 45 23 67 order. We fix it easily in the addition
+ // case but the subtraction propegates them.
+ "vswp d3, d27 \n"
+ "vswp d19, d31 \n"
+
+ // a0 = tmp[0] + tmp[8]
+ "vadd.s32 q2, q0, q1 \n"
+ "vadd.s32 q3, q8, q9 \n"
+
+ // a1 = tmp[4] + tmp[12]
+ "vadd.s32 q10, q13, q12 \n"
+ "vadd.s32 q11, q15, q14 \n"
+
+ // a2 = tmp[4] - tmp[12]
+ "vsub.s32 q13, q13, q12 \n"
+ "vsub.s32 q15, q15, q14 \n"
+
+ // a3 = tmp[0] - tmp[8]
+ "vsub.s32 q0, q0, q1 \n"
+ "vsub.s32 q8, q8, q9 \n"
+
+ // b0 = a0 + a1
+ "vadd.s32 q1, q2, q10 \n"
+ "vadd.s32 q9, q3, q11 \n"
+
+ // b1 = a3 + a2
+ "vadd.s32 q12, q0, q13 \n"
+ "vadd.s32 q14, q8, q15 \n"
+
+ // b2 = a3 - a2
+ "vsub.s32 q0, q0, q13 \n"
+ "vsub.s32 q8, q8, q15 \n"
+
+ // b3 = a0 - a1
+ "vsub.s32 q2, q2, q10 \n"
+ "vsub.s32 q3, q3, q11 \n"
+
+ "vld1.64 {q10, q11}, [%[w]] \n"
+
+ // abs(b0)
+ "vabs.s32 q1, q1 \n"
+ "vabs.s32 q9, q9 \n"
+ // abs(b1)
+ "vabs.s32 q12, q12 \n"
+ "vabs.s32 q14, q14 \n"
+ // abs(b2)
+ "vabs.s32 q0, q0 \n"
+ "vabs.s32 q8, q8 \n"
+ // abs(b3)
+ "vabs.s32 q2, q2 \n"
+ "vabs.s32 q3, q3 \n"
+
+ // expand w before using.
+ "vmovl.u16 q13, d20 \n"
+ "vmovl.u16 q15, d21 \n"
+
+ // w[0] * abs(b0)
+ "vmul.u32 q1, q1, q13 \n"
+ "vmul.u32 q9, q9, q13 \n"
+
+ // w[4] * abs(b1)
+ "vmla.u32 q1, q12, q15 \n"
+ "vmla.u32 q9, q14, q15 \n"
+
+ // expand w before using.
+ "vmovl.u16 q13, d22 \n"
+ "vmovl.u16 q15, d23 \n"
+
+ // w[8] * abs(b1)
+ "vmla.u32 q1, q0, q13 \n"
+ "vmla.u32 q9, q8, q13 \n"
+
+ // w[12] * abs(b1)
+ "vmla.u32 q1, q2, q15 \n"
+ "vmla.u32 q9, q3, q15 \n"
+
+ // Sum the arrays
+ "vpaddl.u32 q1, q1 \n"
+ "vpaddl.u32 q9, q9 \n"
+ "vadd.u64 d2, d3 \n"
+ "vadd.u64 d18, d19 \n"
+
+ // Hadamard transform needs 4 bits of extra precision (2 bits in each
+ // direction) for dynamic raw. Weights w[] are 16bits at max, so the maximum
+ // precision for coeff is 8bit of input + 4bits of Hadamard transform +
+ // 16bits for w[] + 2 bits of abs() summation.
+ //
+ // This uses a maximum of 31 bits (signed). Discarding the top 32 bits is
+ // A-OK.
+
+ // sum2 - sum1
+ "vsub.u32 d0, d2, d18 \n"
+ // abs(sum2 - sum1)
+ "vabs.s32 d0, d0 \n"
+ // abs(sum2 - sum1) >> 5
+ "vshr.u32 d0, #5 \n"
+
+ // It would be better to move the value straight into r0 but I'm not
+ // entirely sure how this works with inline assembly.
+ "vmov.32 %[sum], d0[0] \n"
+
+ : [sum] "=r"(sum), [a] "+r"(A), [b] "+r"(B), [w] "+r"(W)
+ : [kBPS] "r"(kBPS)
+ : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9",
+ "q10", "q11", "q12", "q13", "q14", "q15" // clobbered
+ ) ;
+
+ return sum;
+}
+
+static int Disto16x16(const uint8_t* const a, const uint8_t* const b,
+ const uint16_t* const w) {
+ int D = 0;
+ int x, y;
+ for (y = 0; y < 16 * BPS; y += 4 * BPS) {
+ for (x = 0; x < 16; x += 4) {
+ D += Disto4x4(a + x + y, b + x + y, w);
+ }
+ }
+ return D;
+}
+
+#endif // WEBP_USE_NEON
+
+//------------------------------------------------------------------------------
+// Entry point
+
+extern void VP8EncDspInitNEON(void);
+
+void VP8EncDspInitNEON(void) {
+#if defined(WEBP_USE_NEON)
+ VP8ITransform = ITransform;
+ VP8FTransform = FTransform;
+
+ VP8ITransformWHT = ITransformWHT;
+ VP8FTransformWHT = FTransformWHT;
+
+ VP8TDisto4x4 = Disto4x4;
+ VP8TDisto16x16 = Disto16x16;
+#endif // WEBP_USE_NEON
+}
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/src/dsp/enc_sse2.c b/src/dsp/enc_sse2.c
index b046761d..c4148b56 100644
--- a/src/dsp/enc_sse2.c
+++ b/src/dsp/enc_sse2.c
@@ -11,27 +11,58 @@
#include "./dsp.h"
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
#if defined(WEBP_USE_SSE2)
#include <stdlib.h> // for abs()
#include <emmintrin.h>
#include "../enc/vp8enci.h"
-#if defined(__cplusplus) || defined(c_plusplus)
-extern "C" {
+//------------------------------------------------------------------------------
+// Quite useful macro for debugging. Left here for convenience.
+
+#if 0
+#include <stdio.h>
+static void PrintReg(const __m128i r, const char* const name, int size) {
+ int n;
+ union {
+ __m128i r;
+ uint8_t i8[16];
+ uint16_t i16[8];
+ uint32_t i32[4];
+ uint64_t i64[2];
+ } tmp;
+ tmp.r = r;
+ printf("%s\t: ", name);
+ if (size == 8) {
+ for (n = 0; n < 16; ++n) printf("%.2x ", tmp.i8[n]);
+ } else if (size == 16) {
+ for (n = 0; n < 8; ++n) printf("%.4x ", tmp.i16[n]);
+ } else if (size == 32) {
+ for (n = 0; n < 4; ++n) printf("%.8x ", tmp.i32[n]);
+ } else {
+ for (n = 0; n < 2; ++n) printf("%.16lx ", tmp.i64[n]);
+ }
+ printf("\n");
+}
#endif
//------------------------------------------------------------------------------
// Compute susceptibility based on DCT-coeff histograms:
// the higher, the "easier" the macroblock is to compress.
-static int CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred,
- int start_block, int end_block) {
- int histo[MAX_COEFF_THRESH + 1] = { 0 };
- int16_t out[16];
- int j, k;
+static void CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred,
+ int start_block, int end_block,
+ VP8Histogram* const histo) {
const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH);
+ int j;
for (j = start_block; j < end_block; ++j) {
+ int16_t out[16];
+ int k;
+
VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out);
// Convert coefficients to bin (within out[]).
@@ -47,9 +78,9 @@ static int CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred,
const __m128i xor1 = _mm_xor_si128(out1, sign1);
const __m128i abs0 = _mm_sub_epi16(xor0, sign0);
const __m128i abs1 = _mm_sub_epi16(xor1, sign1);
- // v = abs(out) >> 2
- const __m128i v0 = _mm_srai_epi16(abs0, 2);
- const __m128i v1 = _mm_srai_epi16(abs1, 2);
+ // v = abs(out) >> 3
+ const __m128i v0 = _mm_srai_epi16(abs0, 3);
+ const __m128i v1 = _mm_srai_epi16(abs1, 3);
// bin = min(v, MAX_COEFF_THRESH)
const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh);
const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh);
@@ -58,13 +89,11 @@ static int CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred,
_mm_storeu_si128((__m128i*)&out[8], bin1);
}
- // Use bin to update histogram.
+ // Convert coefficients to bin.
for (k = 0; k < 16; ++k) {
- histo[out[k]]++;
+ histo->distribution[out[k]]++;
}
}
-
- return VP8GetAlpha(histo);
}
//------------------------------------------------------------------------------
@@ -243,7 +272,7 @@ static void ITransformSSE2(const uint8_t* ref, const int16_t* in, uint8_t* dst,
// Add inverse transform to 'ref' and store.
{
- const __m128i zero = _mm_set1_epi16(0);
+ const __m128i zero = _mm_setzero_si128();
// Load the reference(s).
__m128i ref0, ref1, ref2, ref3;
if (do_two) {
@@ -295,17 +324,23 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
int16_t* out) {
const __m128i zero = _mm_setzero_si128();
const __m128i seven = _mm_set1_epi16(7);
- const __m128i k7500 = _mm_set1_epi32(7500);
- const __m128i k14500 = _mm_set1_epi32(14500);
+ const __m128i k937 = _mm_set1_epi32(937);
+ const __m128i k1812 = _mm_set1_epi32(1812);
const __m128i k51000 = _mm_set1_epi32(51000);
const __m128i k12000_plus_one = _mm_set1_epi32(12000 + (1 << 16));
const __m128i k5352_2217 = _mm_set_epi16(5352, 2217, 5352, 2217,
5352, 2217, 5352, 2217);
const __m128i k2217_5352 = _mm_set_epi16(2217, -5352, 2217, -5352,
2217, -5352, 2217, -5352);
-
+ const __m128i k88p = _mm_set_epi16(8, 8, 8, 8, 8, 8, 8, 8);
+ const __m128i k88m = _mm_set_epi16(-8, 8, -8, 8, -8, 8, -8, 8);
+ const __m128i k5352_2217p = _mm_set_epi16(2217, 5352, 2217, 5352,
+ 2217, 5352, 2217, 5352);
+ const __m128i k5352_2217m = _mm_set_epi16(-5352, 2217, -5352, 2217,
+ -5352, 2217, -5352, 2217);
__m128i v01, v32;
+
// Difference between src and ref and initial transpose.
{
// Load src and convert to 16b.
@@ -326,73 +361,52 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
const __m128i ref_1 = _mm_unpacklo_epi8(ref1, zero);
const __m128i ref_2 = _mm_unpacklo_epi8(ref2, zero);
const __m128i ref_3 = _mm_unpacklo_epi8(ref3, zero);
- // Compute difference.
+ // Compute difference. -> 00 01 02 03 00 00 00 00
const __m128i diff0 = _mm_sub_epi16(src_0, ref_0);
const __m128i diff1 = _mm_sub_epi16(src_1, ref_1);
const __m128i diff2 = _mm_sub_epi16(src_2, ref_2);
const __m128i diff3 = _mm_sub_epi16(src_3, ref_3);
- // Transpose.
+
+ // Unpack and shuffle
// 00 01 02 03 0 0 0 0
// 10 11 12 13 0 0 0 0
// 20 21 22 23 0 0 0 0
// 30 31 32 33 0 0 0 0
- const __m128i transpose0_0 = _mm_unpacklo_epi16(diff0, diff1);
- const __m128i transpose0_1 = _mm_unpacklo_epi16(diff2, diff3);
- // 00 10 01 11 02 12 03 13
- // 20 30 21 31 22 32 23 33
- const __m128i v23 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
- v01 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
- v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2));
- // a02 a12 a22 a32 a03 a13 a23 a33
- // a00 a10 a20 a30 a01 a11 a21 a31
- // a03 a13 a23 a33 a02 a12 a22 a32
- }
-
- // First pass and subsequent transpose.
- {
- // Same operations are done on the (0,3) and (1,2) pairs.
- // b0 = (a0 + a3) << 3
- // b1 = (a1 + a2) << 3
- // b3 = (a0 - a3) << 3
- // b2 = (a1 - a2) << 3
- const __m128i a01 = _mm_add_epi16(v01, v32);
- const __m128i a32 = _mm_sub_epi16(v01, v32);
- const __m128i b01 = _mm_slli_epi16(a01, 3);
- const __m128i b32 = _mm_slli_epi16(a32, 3);
- const __m128i b11 = _mm_unpackhi_epi64(b01, b01);
- const __m128i b22 = _mm_unpackhi_epi64(b32, b32);
-
- // e0 = b0 + b1
- // e2 = b0 - b1
- const __m128i e0 = _mm_add_epi16(b01, b11);
- const __m128i e2 = _mm_sub_epi16(b01, b11);
- const __m128i e02 = _mm_unpacklo_epi64(e0, e2);
-
- // e1 = (b3 * 5352 + b2 * 2217 + 14500) >> 12
- // e3 = (b3 * 2217 - b2 * 5352 + 7500) >> 12
- const __m128i b23 = _mm_unpacklo_epi16(b22, b32);
- const __m128i c1 = _mm_madd_epi16(b23, k5352_2217);
- const __m128i c3 = _mm_madd_epi16(b23, k2217_5352);
- const __m128i d1 = _mm_add_epi32(c1, k14500);
- const __m128i d3 = _mm_add_epi32(c3, k7500);
- const __m128i e1 = _mm_srai_epi32(d1, 12);
- const __m128i e3 = _mm_srai_epi32(d3, 12);
- const __m128i e13 = _mm_packs_epi32(e1, e3);
-
- // Transpose.
- // 00 01 02 03 20 21 22 23
- // 10 11 12 13 30 31 32 33
- const __m128i transpose0_0 = _mm_unpacklo_epi16(e02, e13);
- const __m128i transpose0_1 = _mm_unpackhi_epi16(e02, e13);
- // 00 10 01 11 02 12 03 13
- // 20 30 21 31 22 32 23 33
- const __m128i v23 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
- v01 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
- v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2));
- // 02 12 22 32 03 13 23 33
- // 00 10 20 30 01 11 21 31
- // 03 13 23 33 02 12 22 32
+ const __m128i shuf01 = _mm_unpacklo_epi32(diff0, diff1);
+ const __m128i shuf23 = _mm_unpacklo_epi32(diff2, diff3);
+ // 00 01 10 11 02 03 12 13
+ // 20 21 30 31 22 23 32 33
+ const __m128i shuf01_p =
+ _mm_shufflehi_epi16(shuf01, _MM_SHUFFLE(2, 3, 0, 1));
+ const __m128i shuf23_p =
+ _mm_shufflehi_epi16(shuf23, _MM_SHUFFLE(2, 3, 0, 1));
+ // 00 01 10 11 03 02 13 12
+ // 20 21 30 31 23 22 33 32
+ const __m128i s01 = _mm_unpacklo_epi64(shuf01_p, shuf23_p);
+ const __m128i s32 = _mm_unpackhi_epi64(shuf01_p, shuf23_p);
+ // 00 01 10 11 20 21 30 31
+ // 03 02 13 12 23 22 33 32
+ const __m128i a01 = _mm_add_epi16(s01, s32);
+ const __m128i a32 = _mm_sub_epi16(s01, s32);
+ // [d0 + d3 | d1 + d2 | ...] = [a0 a1 | a0' a1' | ... ]
+ // [d0 - d3 | d1 - d2 | ...] = [a3 a2 | a3' a2' | ... ]
+
+ const __m128i tmp0 = _mm_madd_epi16(a01, k88p); // [ (a0 + a1) << 3, ... ]
+ const __m128i tmp2 = _mm_madd_epi16(a01, k88m); // [ (a0 - a1) << 3, ... ]
+ const __m128i tmp1_1 = _mm_madd_epi16(a32, k5352_2217p);
+ const __m128i tmp3_1 = _mm_madd_epi16(a32, k5352_2217m);
+ const __m128i tmp1_2 = _mm_add_epi32(tmp1_1, k1812);
+ const __m128i tmp3_2 = _mm_add_epi32(tmp3_1, k937);
+ const __m128i tmp1 = _mm_srai_epi32(tmp1_2, 9);
+ const __m128i tmp3 = _mm_srai_epi32(tmp3_2, 9);
+ const __m128i s03 = _mm_packs_epi32(tmp0, tmp2);
+ const __m128i s12 = _mm_packs_epi32(tmp1, tmp3);
+ const __m128i s_lo = _mm_unpacklo_epi16(s03, s12); // 0 1 0 1 0 1...
+ const __m128i s_hi = _mm_unpackhi_epi16(s03, s12); // 2 3 2 3 2 3
+ const __m128i v23 = _mm_unpackhi_epi32(s_lo, s_hi);
+ v01 = _mm_unpacklo_epi32(s_lo, s_hi);
+ v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2)); // 3 2 3 2 3 2..
}
// Second pass
@@ -406,13 +420,12 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
const __m128i a32 = _mm_sub_epi16(v01, v32);
const __m128i a11 = _mm_unpackhi_epi64(a01, a01);
const __m128i a22 = _mm_unpackhi_epi64(a32, a32);
+ const __m128i a01_plus_7 = _mm_add_epi16(a01, seven);
// d0 = (a0 + a1 + 7) >> 4;
// d2 = (a0 - a1 + 7) >> 4;
- const __m128i b0 = _mm_add_epi16(a01, a11);
- const __m128i b2 = _mm_sub_epi16(a01, a11);
- const __m128i c0 = _mm_add_epi16(b0, seven);
- const __m128i c2 = _mm_add_epi16(b2, seven);
+ const __m128i c0 = _mm_add_epi16(a01_plus_7, a11);
+ const __m128i c2 = _mm_sub_epi16(a01_plus_7, a11);
const __m128i d0 = _mm_srai_epi16(c0, 4);
const __m128i d2 = _mm_srai_epi16(c2, 4);
@@ -430,6 +443,7 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
// f1 = f1 + (a3 != 0);
// The compare will return (0xffff, 0) for (==0, !=0). To turn that into the
// desired (0, 1), we add one earlier through k12000_plus_one.
+ // -> f1 = f1 + 1 - (a3 == 0)
const __m128i g1 = _mm_add_epi16(f1, _mm_cmpeq_epi16(a32, zero));
_mm_storel_epi64((__m128i*)&out[ 0], d0);
@@ -442,10 +456,101 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
//------------------------------------------------------------------------------
// Metric
+static int SSE_Nx4SSE2(const uint8_t* a, const uint8_t* b,
+ int num_quads, int do_16) {
+ const __m128i zero = _mm_setzero_si128();
+ __m128i sum1 = zero;
+ __m128i sum2 = zero;
+
+ while (num_quads-- > 0) {
+ // Note: for the !do_16 case, we read 16 pixels instead of 8 but that's ok,
+ // thanks to buffer over-allocation to that effect.
+ const __m128i a0 = _mm_loadu_si128((__m128i*)&a[BPS * 0]);
+ const __m128i a1 = _mm_loadu_si128((__m128i*)&a[BPS * 1]);
+ const __m128i a2 = _mm_loadu_si128((__m128i*)&a[BPS * 2]);
+ const __m128i a3 = _mm_loadu_si128((__m128i*)&a[BPS * 3]);
+ const __m128i b0 = _mm_loadu_si128((__m128i*)&b[BPS * 0]);
+ const __m128i b1 = _mm_loadu_si128((__m128i*)&b[BPS * 1]);
+ const __m128i b2 = _mm_loadu_si128((__m128i*)&b[BPS * 2]);
+ const __m128i b3 = _mm_loadu_si128((__m128i*)&b[BPS * 3]);
+
+ // compute clip0(a-b) and clip0(b-a)
+ const __m128i a0p = _mm_subs_epu8(a0, b0);
+ const __m128i a0m = _mm_subs_epu8(b0, a0);
+ const __m128i a1p = _mm_subs_epu8(a1, b1);
+ const __m128i a1m = _mm_subs_epu8(b1, a1);
+ const __m128i a2p = _mm_subs_epu8(a2, b2);
+ const __m128i a2m = _mm_subs_epu8(b2, a2);
+ const __m128i a3p = _mm_subs_epu8(a3, b3);
+ const __m128i a3m = _mm_subs_epu8(b3, a3);
+
+ // compute |a-b| with 8b arithmetic as clip0(a-b) | clip0(b-a)
+ const __m128i diff0 = _mm_or_si128(a0p, a0m);
+ const __m128i diff1 = _mm_or_si128(a1p, a1m);
+ const __m128i diff2 = _mm_or_si128(a2p, a2m);
+ const __m128i diff3 = _mm_or_si128(a3p, a3m);
+
+ // unpack (only four operations, instead of eight)
+ const __m128i low0 = _mm_unpacklo_epi8(diff0, zero);
+ const __m128i low1 = _mm_unpacklo_epi8(diff1, zero);
+ const __m128i low2 = _mm_unpacklo_epi8(diff2, zero);
+ const __m128i low3 = _mm_unpacklo_epi8(diff3, zero);
+
+ // multiply with self
+ const __m128i low_madd0 = _mm_madd_epi16(low0, low0);
+ const __m128i low_madd1 = _mm_madd_epi16(low1, low1);
+ const __m128i low_madd2 = _mm_madd_epi16(low2, low2);
+ const __m128i low_madd3 = _mm_madd_epi16(low3, low3);
+
+ // collect in a cascading way
+ const __m128i low_sum0 = _mm_add_epi32(low_madd0, low_madd1);
+ const __m128i low_sum1 = _mm_add_epi32(low_madd2, low_madd3);
+ sum1 = _mm_add_epi32(sum1, low_sum0);
+ sum2 = _mm_add_epi32(sum2, low_sum1);
+
+ if (do_16) { // if necessary, process the higher 8 bytes similarly
+ const __m128i hi0 = _mm_unpackhi_epi8(diff0, zero);
+ const __m128i hi1 = _mm_unpackhi_epi8(diff1, zero);
+ const __m128i hi2 = _mm_unpackhi_epi8(diff2, zero);
+ const __m128i hi3 = _mm_unpackhi_epi8(diff3, zero);
+
+ const __m128i hi_madd0 = _mm_madd_epi16(hi0, hi0);
+ const __m128i hi_madd1 = _mm_madd_epi16(hi1, hi1);
+ const __m128i hi_madd2 = _mm_madd_epi16(hi2, hi2);
+ const __m128i hi_madd3 = _mm_madd_epi16(hi3, hi3);
+ const __m128i hi_sum0 = _mm_add_epi32(hi_madd0, hi_madd1);
+ const __m128i hi_sum1 = _mm_add_epi32(hi_madd2, hi_madd3);
+ sum1 = _mm_add_epi32(sum1, hi_sum0);
+ sum2 = _mm_add_epi32(sum2, hi_sum1);
+ }
+ a += 4 * BPS;
+ b += 4 * BPS;
+ }
+ {
+ int32_t tmp[4];
+ const __m128i sum = _mm_add_epi32(sum1, sum2);
+ _mm_storeu_si128((__m128i*)tmp, sum);
+ return (tmp[3] + tmp[2] + tmp[1] + tmp[0]);
+ }
+}
+
+static int SSE16x16SSE2(const uint8_t* a, const uint8_t* b) {
+ return SSE_Nx4SSE2(a, b, 4, 1);
+}
+
+static int SSE16x8SSE2(const uint8_t* a, const uint8_t* b) {
+ return SSE_Nx4SSE2(a, b, 2, 1);
+}
+
+static int SSE8x8SSE2(const uint8_t* a, const uint8_t* b) {
+ return SSE_Nx4SSE2(a, b, 2, 0);
+}
+
static int SSE4x4SSE2(const uint8_t* a, const uint8_t* b) {
- const __m128i zero = _mm_set1_epi16(0);
+ const __m128i zero = _mm_setzero_si128();
- // Load values.
+ // Load values. Note that we read 8 pixels instead of 4,
+ // but the a/b buffers are over-allocated to that effect.
const __m128i a0 = _mm_loadl_epi64((__m128i*)&a[BPS * 0]);
const __m128i a1 = _mm_loadl_epi64((__m128i*)&a[BPS * 1]);
const __m128i a2 = _mm_loadl_epi64((__m128i*)&a[BPS * 2]);
@@ -483,6 +588,7 @@ static int SSE4x4SSE2(const uint8_t* a, const uint8_t* b) {
const __m128i sum0 = _mm_add_epi32(madd0, madd1);
const __m128i sum1 = _mm_add_epi32(madd2, madd3);
const __m128i sum2 = _mm_add_epi32(sum0, sum1);
+
int32_t tmp[4];
_mm_storeu_si128((__m128i*)tmp, sum2);
return (tmp[3] + tmp[2] + tmp[1] + tmp[0]);
@@ -502,8 +608,6 @@ static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
int32_t sum[4];
__m128i tmp_0, tmp_1, tmp_2, tmp_3;
const __m128i zero = _mm_setzero_si128();
- const __m128i one = _mm_set1_epi16(1);
- const __m128i three = _mm_set1_epi16(3);
// Load, combine and tranpose inputs.
{
@@ -550,17 +654,14 @@ static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
// Horizontal pass and subsequent transpose.
{
// Calculate a and b (two 4x4 at once).
- const __m128i a0 = _mm_slli_epi16(_mm_add_epi16(tmp_0, tmp_2), 2);
- const __m128i a1 = _mm_slli_epi16(_mm_add_epi16(tmp_1, tmp_3), 2);
- const __m128i a2 = _mm_slli_epi16(_mm_sub_epi16(tmp_1, tmp_3), 2);
- const __m128i a3 = _mm_slli_epi16(_mm_sub_epi16(tmp_0, tmp_2), 2);
- // b0_extra = (a0 != 0);
- const __m128i b0_extra = _mm_andnot_si128(_mm_cmpeq_epi16 (a0, zero), one);
- const __m128i b0_base = _mm_add_epi16(a0, a1);
+ const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);
+ const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);
+ const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);
+ const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);
+ const __m128i b0 = _mm_add_epi16(a0, a1);
const __m128i b1 = _mm_add_epi16(a3, a2);
const __m128i b2 = _mm_sub_epi16(a3, a2);
const __m128i b3 = _mm_sub_epi16(a0, a1);
- const __m128i b0 = _mm_add_epi16(b0_base, b0_extra);
// a00 a01 a02 a03 b00 b01 b02 b03
// a10 a11 a12 a13 b10 b11 b12 b13
// a20 a21 a22 a23 b20 b21 b22 b23
@@ -635,19 +736,6 @@ static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
B_b2 = _mm_sub_epi16(B_b2, sign_B_b2);
}
- // b = abs(b) + 3
- A_b0 = _mm_add_epi16(A_b0, three);
- A_b2 = _mm_add_epi16(A_b2, three);
- B_b0 = _mm_add_epi16(B_b0, three);
- B_b2 = _mm_add_epi16(B_b2, three);
-
- // abs((b + (b<0) + 3) >> 3) = (abs(b) + 3) >> 3
- // b = (abs(b) + 3) >> 3
- A_b0 = _mm_srai_epi16(A_b0, 3);
- A_b2 = _mm_srai_epi16(A_b2, 3);
- B_b0 = _mm_srai_epi16(B_b0, 3);
- B_b2 = _mm_srai_epi16(B_b2, 3);
-
// weighted sums
A_b0 = _mm_madd_epi16(A_b0, w_0);
A_b2 = _mm_madd_epi16(A_b2, w_8);
@@ -666,7 +754,7 @@ static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
static int Disto4x4SSE2(const uint8_t* const a, const uint8_t* const b,
const uint16_t* const w) {
const int diff_sum = TTransformSSE2(a, b, w);
- return (abs(diff_sum) + 8) >> 4;
+ return abs(diff_sum) >> 5;
}
static int Disto16x16SSE2(const uint8_t* const a, const uint8_t* const b,
@@ -681,7 +769,6 @@ static int Disto16x16SSE2(const uint8_t* const a, const uint8_t* const b,
return D;
}
-
//------------------------------------------------------------------------------
// Quantization
//
@@ -690,8 +777,7 @@ static int Disto16x16SSE2(const uint8_t* const a, const uint8_t* const b,
static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
int n, const VP8Matrix* const mtx) {
const __m128i max_coeff_2047 = _mm_set1_epi16(2047);
- const __m128i zero = _mm_set1_epi16(0);
- __m128i sign0, sign8;
+ const __m128i zero = _mm_setzero_si128();
__m128i coeff0, coeff8;
__m128i out0, out8;
__m128i packed_out;
@@ -713,8 +799,8 @@ static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
const __m128i zthresh8 = _mm_loadu_si128((__m128i*)&mtx->zthresh_[8]);
// sign(in) = in >> 15 (0x0000 if positive, 0xffff if negative)
- sign0 = _mm_srai_epi16(in0, 15);
- sign8 = _mm_srai_epi16(in8, 15);
+ const __m128i sign0 = _mm_srai_epi16(in0, 15);
+ const __m128i sign8 = _mm_srai_epi16(in8, 15);
// coeff = abs(in) = (in ^ sign) - sign
coeff0 = _mm_xor_si128(in0, sign0);
@@ -819,19 +905,28 @@ static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
}
}
+#endif // WEBP_USE_SSE2
+
+//------------------------------------------------------------------------------
+// Entry point
+
extern void VP8EncDspInitSSE2(void);
+
void VP8EncDspInitSSE2(void) {
+#if defined(WEBP_USE_SSE2)
VP8CollectHistogram = CollectHistogramSSE2;
VP8EncQuantizeBlock = QuantizeBlockSSE2;
VP8ITransform = ITransformSSE2;
VP8FTransform = FTransformSSE2;
+ VP8SSE16x16 = SSE16x16SSE2;
+ VP8SSE16x8 = SSE16x8SSE2;
+ VP8SSE8x8 = SSE8x8SSE2;
VP8SSE4x4 = SSE4x4SSE2;
VP8TDisto4x4 = Disto4x4SSE2;
VP8TDisto16x16 = Disto16x16SSE2;
+#endif // WEBP_USE_SSE2
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
-
-#endif // WEBP_USE_SSE2
diff --git a/src/dsp/lossless.c b/src/dsp/lossless.c
index 6d3094fd..080b3e63 100644
--- a/src/dsp/lossless.c
+++ b/src/dsp/lossless.c
@@ -11,27 +11,31 @@
// Jyrki Alakuijala (jyrki@google.com)
// Urvang Joshi (urvang@google.com)
-#define ANDROID_WEBP_RGB
+#include "./dsp.h"
+
+// Define the following if target arch is sure to have SSE2
+// #define WEBP_TARGET_HAS_SSE2
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
+#if defined(WEBP_TARGET_HAS_SSE2)
+#include <emmintrin.h>
+#endif
+
#include <math.h>
#include <stdlib.h>
#include "./lossless.h"
#include "../dec/vp8li.h"
-#include "../dsp/yuv.h"
-#include "../dsp/dsp.h"
-#include "../enc/histogram.h"
+#include "./yuv.h"
#define MAX_DIFF_COST (1e30f)
// lookup table for small values of log2(int)
#define APPROX_LOG_MAX 4096
#define LOG_2_RECIPROCAL 1.44269504088896338700465094007086
-#define LOG_LOOKUP_IDX_MAX 256
-static const float kLog2Table[LOG_LOOKUP_IDX_MAX] = {
+const float kLog2Table[LOG_LOOKUP_IDX_MAX] = {
0.0000000000000000f, 0.0000000000000000f,
1.0000000000000000f, 1.5849625007211560f,
2.0000000000000000f, 2.3219280948873621f,
@@ -162,16 +166,97 @@ static const float kLog2Table[LOG_LOOKUP_IDX_MAX] = {
7.9886846867721654f, 7.9943534368588577f
};
-float VP8LFastLog2(int v) {
- if (v < LOG_LOOKUP_IDX_MAX) {
- return kLog2Table[v];
- } else if (v < APPROX_LOG_MAX) {
+const float kSLog2Table[LOG_LOOKUP_IDX_MAX] = {
+ 0.00000000f, 0.00000000f, 2.00000000f, 4.75488750f,
+ 8.00000000f, 11.60964047f, 15.50977500f, 19.65148445f,
+ 24.00000000f, 28.52932501f, 33.21928095f, 38.05374781f,
+ 43.01955001f, 48.10571634f, 53.30296891f, 58.60335893f,
+ 64.00000000f, 69.48686830f, 75.05865003f, 80.71062276f,
+ 86.43856190f, 92.23866588f, 98.10749561f, 104.04192499f,
+ 110.03910002f, 116.09640474f, 122.21143267f, 128.38196256f,
+ 134.60593782f, 140.88144886f, 147.20671787f, 153.58008562f,
+ 160.00000000f, 166.46500594f, 172.97373660f, 179.52490559f,
+ 186.11730005f, 192.74977453f, 199.42124551f, 206.13068654f,
+ 212.87712380f, 219.65963219f, 226.47733176f, 233.32938445f,
+ 240.21499122f, 247.13338933f, 254.08384998f, 261.06567603f,
+ 268.07820003f, 275.12078236f, 282.19280949f, 289.29369244f,
+ 296.42286534f, 303.57978409f, 310.76392512f, 317.97478424f,
+ 325.21187564f, 332.47473081f, 339.76289772f, 347.07593991f,
+ 354.41343574f, 361.77497759f, 369.16017124f, 376.56863518f,
+ 384.00000000f, 391.45390785f, 398.93001188f, 406.42797576f,
+ 413.94747321f, 421.48818752f, 429.04981119f, 436.63204548f,
+ 444.23460010f, 451.85719280f, 459.49954906f, 467.16140179f,
+ 474.84249102f, 482.54256363f, 490.26137307f, 497.99867911f,
+ 505.75424759f, 513.52785023f, 521.31926438f, 529.12827280f,
+ 536.95466351f, 544.79822957f, 552.65876890f, 560.53608414f,
+ 568.42998244f, 576.34027536f, 584.26677867f, 592.20931226f,
+ 600.16769996f, 608.14176943f, 616.13135206f, 624.13628279f,
+ 632.15640007f, 640.19154569f, 648.24156472f, 656.30630539f,
+ 664.38561898f, 672.47935976f, 680.58738488f, 688.70955430f,
+ 696.84573069f, 704.99577935f, 713.15956818f, 721.33696754f,
+ 729.52785023f, 737.73209140f, 745.94956849f, 754.18016116f,
+ 762.42375127f, 770.68022275f, 778.94946161f, 787.23135586f,
+ 795.52579543f, 803.83267219f, 812.15187982f, 820.48331383f,
+ 828.82687147f, 837.18245171f, 845.54995518f, 853.92928416f,
+ 862.32034249f, 870.72303558f, 879.13727036f, 887.56295522f,
+ 896.00000000f, 904.44831595f, 912.90781569f, 921.37841320f,
+ 929.86002376f, 938.35256392f, 946.85595152f, 955.37010560f,
+ 963.89494641f, 972.43039537f, 980.97637504f, 989.53280911f,
+ 998.09962237f, 1006.67674069f, 1015.26409097f, 1023.86160116f,
+ 1032.46920021f, 1041.08681805f, 1049.71438560f, 1058.35183469f,
+ 1066.99909811f, 1075.65610955f, 1084.32280357f, 1092.99911564f,
+ 1101.68498204f, 1110.38033993f, 1119.08512727f, 1127.79928282f,
+ 1136.52274614f, 1145.25545758f, 1153.99735821f, 1162.74838989f,
+ 1171.50849518f, 1180.27761738f, 1189.05570047f, 1197.84268914f,
+ 1206.63852876f, 1215.44316535f, 1224.25654560f, 1233.07861684f,
+ 1241.90932703f, 1250.74862473f, 1259.59645914f, 1268.45278005f,
+ 1277.31753781f, 1286.19068338f, 1295.07216828f, 1303.96194457f,
+ 1312.85996488f, 1321.76618236f, 1330.68055071f, 1339.60302413f,
+ 1348.53355734f, 1357.47210556f, 1366.41862452f, 1375.37307041f,
+ 1384.33539991f, 1393.30557020f, 1402.28353887f, 1411.26926400f,
+ 1420.26270412f, 1429.26381818f, 1438.27256558f, 1447.28890615f,
+ 1456.31280014f, 1465.34420819f, 1474.38309138f, 1483.42941118f,
+ 1492.48312945f, 1501.54420843f, 1510.61261078f, 1519.68829949f,
+ 1528.77123795f, 1537.86138993f, 1546.95871952f, 1556.06319119f,
+ 1565.17476976f, 1574.29342040f, 1583.41910860f, 1592.55180020f,
+ 1601.69146137f, 1610.83805860f, 1619.99155871f, 1629.15192882f,
+ 1638.31913637f, 1647.49314911f, 1656.67393509f, 1665.86146266f,
+ 1675.05570047f, 1684.25661744f, 1693.46418280f, 1702.67836605f,
+ 1711.89913698f, 1721.12646563f, 1730.36032233f, 1739.60067768f,
+ 1748.84750254f, 1758.10076802f, 1767.36044551f, 1776.62650662f,
+ 1785.89892323f, 1795.17766747f, 1804.46271172f, 1813.75402857f,
+ 1823.05159087f, 1832.35537170f, 1841.66534438f, 1850.98148244f,
+ 1860.30375965f, 1869.63214999f, 1878.96662767f, 1888.30716711f,
+ 1897.65374295f, 1907.00633003f, 1916.36490342f, 1925.72943838f,
+ 1935.09991037f, 1944.47629506f, 1953.85856831f, 1963.24670620f,
+ 1972.64068498f, 1982.04048108f, 1991.44607117f, 2000.85743204f,
+ 2010.27454072f, 2019.69737440f, 2029.12591044f, 2038.56012640f
+};
+
+float VP8LFastSLog2Slow(int v) {
+ assert(v >= LOG_LOOKUP_IDX_MAX);
+ if (v < APPROX_LOG_MAX) {
int log_cnt = 0;
+ const float v_f = (float)v;
while (v >= LOG_LOOKUP_IDX_MAX) {
++log_cnt;
v = v >> 1;
}
- return kLog2Table[v] + (float)log_cnt;
+ return v_f * (kLog2Table[v] + log_cnt);
+ } else {
+ return (float)(LOG_2_RECIPROCAL * v * log((double)v));
+ }
+}
+
+float VP8LFastLog2Slow(int v) {
+ assert(v >= LOG_LOOKUP_IDX_MAX);
+ if (v < APPROX_LOG_MAX) {
+ int log_cnt = 0;
+ while (v >= LOG_LOOKUP_IDX_MAX) {
+ ++log_cnt;
+ v = v >> 1;
+ }
+ return kLog2Table[v] + log_cnt;
} else {
return (float)(LOG_2_RECIPROCAL * log((double)v));
}
@@ -200,6 +285,61 @@ static WEBP_INLINE uint32_t Average4(uint32_t a0, uint32_t a1,
return Average2(Average2(a0, a1), Average2(a2, a3));
}
+#if defined(WEBP_TARGET_HAS_SSE2)
+static WEBP_INLINE uint32_t ClampedAddSubtractFull(uint32_t c0, uint32_t c1,
+ uint32_t c2) {
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i C0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c0), zero);
+ const __m128i C1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c1), zero);
+ const __m128i C2 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c2), zero);
+ const __m128i V1 = _mm_add_epi16(C0, C1);
+ const __m128i V2 = _mm_sub_epi16(V1, C2);
+ const __m128i b = _mm_packus_epi16(V2, V2);
+ const uint32_t output = _mm_cvtsi128_si32(b);
+ return output;
+}
+
+static WEBP_INLINE uint32_t ClampedAddSubtractHalf(uint32_t c0, uint32_t c1,
+ uint32_t c2) {
+ const uint32_t ave = Average2(c0, c1);
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i A0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(ave), zero);
+ const __m128i B0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c2), zero);
+ const __m128i A1 = _mm_sub_epi16(A0, B0);
+ const __m128i BgtA = _mm_cmpgt_epi16(B0, A0);
+ const __m128i A2 = _mm_sub_epi16(A1, BgtA);
+ const __m128i A3 = _mm_srai_epi16(A2, 1);
+ const __m128i A4 = _mm_add_epi16(A0, A3);
+ const __m128i A5 = _mm_packus_epi16(A4, A4);
+ const uint32_t output = _mm_cvtsi128_si32(A5);
+ return output;
+}
+
+static WEBP_INLINE uint32_t Select(uint32_t a, uint32_t b, uint32_t c) {
+ int pa_minus_pb;
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i A0 = _mm_cvtsi32_si128(a);
+ const __m128i B0 = _mm_cvtsi32_si128(b);
+ const __m128i C0 = _mm_cvtsi32_si128(c);
+ const __m128i AC0 = _mm_subs_epu8(A0, C0);
+ const __m128i CA0 = _mm_subs_epu8(C0, A0);
+ const __m128i BC0 = _mm_subs_epu8(B0, C0);
+ const __m128i CB0 = _mm_subs_epu8(C0, B0);
+ const __m128i AC = _mm_or_si128(AC0, CA0);
+ const __m128i BC = _mm_or_si128(BC0, CB0);
+ const __m128i pa = _mm_unpacklo_epi8(AC, zero); // |a - c|
+ const __m128i pb = _mm_unpacklo_epi8(BC, zero); // |b - c|
+ const __m128i diff = _mm_sub_epi16(pb, pa);
+ {
+ int16_t out[8];
+ _mm_storeu_si128((__m128i*)out, diff);
+ pa_minus_pb = out[0] + out[1] + out[2] + out[3];
+ }
+ return (pa_minus_pb <= 0) ? a : b;
+}
+
+#else
+
static WEBP_INLINE uint32_t Clip255(uint32_t a) {
if (a < 256) {
return a;
@@ -241,9 +381,9 @@ static WEBP_INLINE uint32_t ClampedAddSubtractHalf(uint32_t c0, uint32_t c1,
}
static WEBP_INLINE int Sub3(int a, int b, int c) {
- const int pa = b - c;
- const int pb = a - c;
- return abs(pa) - abs(pb);
+ const int pb = b - c;
+ const int pa = a - c;
+ return abs(pb) - abs(pa);
}
static WEBP_INLINE uint32_t Select(uint32_t a, uint32_t b, uint32_t c) {
@@ -252,9 +392,9 @@ static WEBP_INLINE uint32_t Select(uint32_t a, uint32_t b, uint32_t c) {
Sub3((a >> 16) & 0xff, (b >> 16) & 0xff, (c >> 16) & 0xff) +
Sub3((a >> 8) & 0xff, (b >> 8) & 0xff, (c >> 8) & 0xff) +
Sub3((a ) & 0xff, (b ) & 0xff, (c ) & 0xff);
-
return (pa_minus_pb <= 0) ? a : b;
}
+#endif
//------------------------------------------------------------------------------
// Predictors
@@ -342,35 +482,36 @@ static float PredictionCostSpatial(const int* counts,
return (float)(-0.1 * bits);
}
-// Compute the Shanon's entropy: Sum(p*log2(p))
-static float ShannonEntropy(const int* const array, int n) {
+// Compute the combined Shanon's entropy for distribution {X} and {X+Y}
+static float CombinedShannonEntropy(const int* const X,
+ const int* const Y, int n) {
int i;
- float retval = 0.f;
- int sum = 0;
+ double retval = 0.;
+ int sumX = 0, sumXY = 0;
for (i = 0; i < n; ++i) {
- if (array[i] != 0) {
- sum += array[i];
- retval -= VP8LFastSLog2(array[i]);
+ const int x = X[i];
+ const int xy = X[i] + Y[i];
+ if (x != 0) {
+ sumX += x;
+ retval -= VP8LFastSLog2(x);
+ }
+ if (xy != 0) {
+ sumXY += xy;
+ retval -= VP8LFastSLog2(xy);
}
}
- retval += VP8LFastSLog2(sum);
- return retval;
+ retval += VP8LFastSLog2(sumX) + VP8LFastSLog2(sumXY);
+ return (float)retval;
}
static float PredictionCostSpatialHistogram(int accumulated[4][256],
int tile[4][256]) {
int i;
- int k;
- int combo[256];
double retval = 0;
for (i = 0; i < 4; ++i) {
- const double exp_val = 0.94;
- retval += PredictionCostSpatial(&tile[i][0], 1, exp_val);
- retval += ShannonEntropy(&tile[i][0], 256);
- for (k = 0; k < 256; ++k) {
- combo[k] = accumulated[i][k] + tile[i][k];
- }
- retval += ShannonEntropy(&combo[0], 256);
+ const double kExpValue = 0.94;
+ retval += PredictionCostSpatial(tile[i], 1, kExpValue);
+ retval += CombinedShannonEntropy(tile[i], accumulated[i], 256);
}
return (float)retval;
}
@@ -574,8 +715,21 @@ static void PredictorInverseTransform(const VP8LTransform* const transform,
}
void VP8LSubtractGreenFromBlueAndRed(uint32_t* argb_data, int num_pixs) {
- int i;
- for (i = 0; i < num_pixs; ++i) {
+ int i = 0;
+#if defined(WEBP_TARGET_HAS_SSE2)
+ const __m128i mask = _mm_set1_epi32(0x0000ff00);
+ for (; i + 4 < num_pixs; i += 4) {
+ const __m128i in = _mm_loadu_si128((__m128i*)&argb_data[i]);
+ const __m128i in_00g0 = _mm_and_si128(in, mask); // 00g0|00g0|...
+ const __m128i in_0g00 = _mm_slli_epi32(in_00g0, 8); // 0g00|0g00|...
+ const __m128i in_000g = _mm_srli_epi32(in_00g0, 8); // 000g|000g|...
+ const __m128i in_0g0g = _mm_or_si128(in_0g00, in_000g);
+ const __m128i out = _mm_sub_epi8(in, in_0g0g);
+ _mm_storeu_si128((__m128i*)&argb_data[i], out);
+ }
+ // fallthrough and finish off with plain-C
+#endif
+ for (; i < num_pixs; ++i) {
const uint32_t argb = argb_data[i];
const uint32_t green = (argb >> 8) & 0xff;
const uint32_t new_r = (((argb >> 16) & 0xff) - green) & 0xff;
@@ -590,9 +744,21 @@ static void AddGreenToBlueAndRed(const VP8LTransform* const transform,
int y_start, int y_end, uint32_t* data) {
const int width = transform->xsize_;
const uint32_t* const data_end = data + (y_end - y_start) * width;
+#if defined(WEBP_TARGET_HAS_SSE2)
+ const __m128i mask = _mm_set1_epi32(0x0000ff00);
+ for (; data + 4 < data_end; data += 4) {
+ const __m128i in = _mm_loadu_si128((__m128i*)data);
+ const __m128i in_00g0 = _mm_and_si128(in, mask); // 00g0|00g0|...
+ const __m128i in_0g00 = _mm_slli_epi32(in_00g0, 8); // 0g00|0g00|...
+ const __m128i in_000g = _mm_srli_epi32(in_00g0, 8); // 000g|000g|...
+ const __m128i in_0g0g = _mm_or_si128(in_0g00, in_000g);
+ const __m128i out = _mm_add_epi8(in, in_0g0g);
+ _mm_storeu_si128((__m128i*)data, out);
+ }
+ // fallthrough and finish off with plain-C
+#endif
while (data < data_end) {
const uint32_t argb = *data;
- // "* 0001001u" is equivalent to "(green << 16) + green)"
const uint32_t green = ((argb >> 8) & 0xff);
uint32_t red_blue = (argb & 0x00ff00ffu);
red_blue += (green << 16) | green;
@@ -657,6 +823,25 @@ static WEBP_INLINE uint32_t TransformColor(const Multipliers* const m,
return (argb & 0xff00ff00u) | (new_red << 16) | (new_blue);
}
+static WEBP_INLINE uint8_t TransformColorRed(uint8_t green_to_red,
+ uint32_t argb) {
+ const uint32_t green = argb >> 8;
+ uint32_t new_red = argb >> 16;
+ new_red -= ColorTransformDelta(green_to_red, green);
+ return (new_red & 0xff);
+}
+
+static WEBP_INLINE uint8_t TransformColorBlue(uint8_t green_to_blue,
+ uint8_t red_to_blue,
+ uint32_t argb) {
+ const uint32_t green = argb >> 8;
+ const uint32_t red = argb >> 16;
+ uint8_t new_blue = argb;
+ new_blue -= ColorTransformDelta(green_to_blue, green);
+ new_blue -= ColorTransformDelta(red_to_blue, red);
+ return (new_blue & 0xff);
+}
+
static WEBP_INLINE int SkipRepeatedPixels(const uint32_t* const argb,
int ix, int xsize) {
const uint32_t v = argb[ix];
@@ -677,14 +862,10 @@ static WEBP_INLINE int SkipRepeatedPixels(const uint32_t* const argb,
static float PredictionCostCrossColor(const int accumulated[256],
const int counts[256]) {
// Favor low entropy, locally and globally.
- int i;
- int combo[256];
- for (i = 0; i < 256; ++i) {
- combo[i] = accumulated[i] + counts[i];
- }
- return ShannonEntropy(combo, 256) +
- ShannonEntropy(counts, 256) +
- PredictionCostSpatial(counts, 3, 2.4); // Favor small absolute values.
+ // Favor small absolute values for PredictionCostSpatial
+ static const double kExpValue = 2.4;
+ return CombinedShannonEntropy(counts, accumulated, 256) +
+ PredictionCostSpatial(counts, 3, kExpValue);
}
static Multipliers GetBestColorTransformForTile(
@@ -714,85 +895,75 @@ static Multipliers GetBestColorTransformForTile(
if (all_y_max > ysize) {
all_y_max = ysize;
}
+
for (green_to_red = -64; green_to_red <= 64; green_to_red += halfstep) {
int histo[256] = { 0 };
int all_y;
- Multipliers tx;
- MultipliersClear(&tx);
- tx.green_to_red_ = green_to_red & 0xff;
for (all_y = tile_y_offset; all_y < all_y_max; ++all_y) {
- uint32_t predict;
int ix = all_y * xsize + tile_x_offset;
int all_x;
for (all_x = tile_x_offset; all_x < all_x_max; ++all_x, ++ix) {
if (SkipRepeatedPixels(argb, ix, xsize)) {
continue;
}
- predict = TransformColor(&tx, argb[ix], 0);
- ++histo[(predict >> 16) & 0xff]; // red.
+ ++histo[TransformColorRed(green_to_red, argb[ix])]; // red.
}
}
cur_diff = PredictionCostCrossColor(&accumulated_red_histo[0], &histo[0]);
- if (tx.green_to_red_ == prevX.green_to_red_) {
+ if ((uint8_t)green_to_red == prevX.green_to_red_) {
cur_diff -= 3; // favor keeping the areas locally similar
}
- if (tx.green_to_red_ == prevY.green_to_red_) {
+ if ((uint8_t)green_to_red == prevY.green_to_red_) {
cur_diff -= 3; // favor keeping the areas locally similar
}
- if (tx.green_to_red_ == 0) {
+ if (green_to_red == 0) {
cur_diff -= 3;
}
if (cur_diff < best_diff) {
best_diff = cur_diff;
- best_tx = tx;
+ best_tx.green_to_red_ = green_to_red;
}
}
best_diff = MAX_DIFF_COST;
- green_to_red = best_tx.green_to_red_;
for (green_to_blue = -32; green_to_blue <= 32; green_to_blue += step) {
for (red_to_blue = -32; red_to_blue <= 32; red_to_blue += step) {
int all_y;
int histo[256] = { 0 };
- Multipliers tx;
- tx.green_to_red_ = green_to_red;
- tx.green_to_blue_ = green_to_blue;
- tx.red_to_blue_ = red_to_blue;
for (all_y = tile_y_offset; all_y < all_y_max; ++all_y) {
- uint32_t predict;
int all_x;
int ix = all_y * xsize + tile_x_offset;
for (all_x = tile_x_offset; all_x < all_x_max; ++all_x, ++ix) {
if (SkipRepeatedPixels(argb, ix, xsize)) {
continue;
}
- predict = TransformColor(&tx, argb[ix], 0);
- ++histo[predict & 0xff]; // blue.
+ ++histo[TransformColorBlue(green_to_blue, red_to_blue, argb[ix])];
}
}
cur_diff =
- PredictionCostCrossColor(&accumulated_blue_histo[0], &histo[0]);
- if (tx.green_to_blue_ == prevX.green_to_blue_) {
+ PredictionCostCrossColor(&accumulated_blue_histo[0], &histo[0]);
+ if ((uint8_t)green_to_blue == prevX.green_to_blue_) {
cur_diff -= 3; // favor keeping the areas locally similar
}
- if (tx.green_to_blue_ == prevY.green_to_blue_) {
+ if ((uint8_t)green_to_blue == prevY.green_to_blue_) {
cur_diff -= 3; // favor keeping the areas locally similar
}
- if (tx.red_to_blue_ == prevX.red_to_blue_) {
+ if ((uint8_t)red_to_blue == prevX.red_to_blue_) {
cur_diff -= 3; // favor keeping the areas locally similar
}
- if (tx.red_to_blue_ == prevY.red_to_blue_) {
+ if ((uint8_t)red_to_blue == prevY.red_to_blue_) {
cur_diff -= 3; // favor keeping the areas locally similar
}
- if (tx.green_to_blue_ == 0) {
+ if (green_to_blue == 0) {
cur_diff -= 3;
}
- if (tx.red_to_blue_ == 0) {
+ if (red_to_blue == 0) {
cur_diff -= 3;
}
if (cur_diff < best_diff) {
best_diff = cur_diff;
- best_tx = tx;
+ best_tx.green_to_blue_ = green_to_blue;
+ best_tx.red_to_blue_ = red_to_blue;
}
}
}
@@ -1036,12 +1207,14 @@ static void ConvertBGRAToRGBA4444(const uint32_t* src,
const uint32_t* const src_end = src + num_pixels;
while (src < src_end) {
const uint32_t argb = *src++;
-#ifdef ANDROID_WEBP_RGB
- *dst++ = ((argb >> 0) & 0xf0) | ((argb >> 28) & 0xf);
- *dst++ = ((argb >> 16) & 0xf0) | ((argb >> 12) & 0xf);
+ const uint8_t rg = ((argb >> 16) & 0xf0) | ((argb >> 12) & 0xf);
+ const uint8_t ba = ((argb >> 0) & 0xf0) | ((argb >> 28) & 0xf);
+#ifdef WEBP_SWAP_16BIT_CSP
+ *dst++ = ba;
+ *dst++ = rg;
#else
- *dst++ = ((argb >> 16) & 0xf0) | ((argb >> 12) & 0xf);
- *dst++ = ((argb >> 0) & 0xf0) | ((argb >> 28) & 0xf);
+ *dst++ = rg;
+ *dst++ = ba;
#endif
}
}
@@ -1051,12 +1224,14 @@ static void ConvertBGRAToRGB565(const uint32_t* src,
const uint32_t* const src_end = src + num_pixels;
while (src < src_end) {
const uint32_t argb = *src++;
-#ifdef ANDROID_WEBP_RGB
- *dst++ = ((argb >> 5) & 0xe0) | ((argb >> 3) & 0x1f);
- *dst++ = ((argb >> 16) & 0xf8) | ((argb >> 13) & 0x7);
+ const uint8_t rg = ((argb >> 16) & 0xf8) | ((argb >> 13) & 0x7);
+ const uint8_t gb = ((argb >> 5) & 0xe0) | ((argb >> 3) & 0x1f);
+#ifdef WEBP_SWAP_16BIT_CSP
+ *dst++ = gb;
+ *dst++ = rg;
#else
- *dst++ = ((argb >> 16) & 0xf8) | ((argb >> 13) & 0x7);
- *dst++ = ((argb >> 5) & 0xe0) | ((argb >> 3) & 0x1f);
+ *dst++ = rg;
+ *dst++ = gb;
#endif
}
}
@@ -1078,20 +1253,27 @@ static void CopyOrSwap(const uint32_t* src, int num_pixels, uint8_t* dst,
const uint32_t* const src_end = src + num_pixels;
while (src < src_end) {
uint32_t argb = *src++;
+
+#if !defined(WEBP_REFERENCE_IMPLEMENTATION)
#if !defined(__BIG_ENDIAN__) && (defined(__i386__) || defined(__x86_64__))
__asm__ volatile("bswap %0" : "=r"(argb) : "0"(argb));
*(uint32_t*)dst = argb;
- dst += sizeof(argb);
#elif !defined(__BIG_ENDIAN__) && defined(_MSC_VER)
argb = _byteswap_ulong(argb);
*(uint32_t*)dst = argb;
- dst += sizeof(argb);
#else
- *dst++ = (argb >> 24) & 0xff;
- *dst++ = (argb >> 16) & 0xff;
- *dst++ = (argb >> 8) & 0xff;
- *dst++ = (argb >> 0) & 0xff;
+ dst[0] = (argb >> 24) & 0xff;
+ dst[1] = (argb >> 16) & 0xff;
+ dst[2] = (argb >> 8) & 0xff;
+ dst[3] = (argb >> 0) & 0xff;
+#endif
+#else // WEBP_REFERENCE_IMPLEMENTATION
+ dst[0] = (argb >> 24) & 0xff;
+ dst[1] = (argb >> 16) & 0xff;
+ dst[2] = (argb >> 8) & 0xff;
+ dst[3] = (argb >> 0) & 0xff;
#endif
+ dst += sizeof(argb);
}
} else {
memcpy(dst, src, num_pixels * sizeof(*src));
diff --git a/src/dsp/lossless.h b/src/dsp/lossless.h
index 22a91cbf..f14e2ea7 100644
--- a/src/dsp/lossless.h
+++ b/src/dsp/lossless.h
@@ -59,10 +59,20 @@ static WEBP_INLINE uint32_t VP8LSubSampleSize(uint32_t size,
return (size + (1 << sampling_bits) - 1) >> sampling_bits;
}
-// Faster logarithm for integers, with the property of log2(0) == 0.
-float VP8LFastLog2(int v);
+// Faster logarithm for integers. Small values use a look-up table.
+#define LOG_LOOKUP_IDX_MAX 256
+extern const float kLog2Table[LOG_LOOKUP_IDX_MAX];
+extern const float kSLog2Table[LOG_LOOKUP_IDX_MAX];
+extern float VP8LFastLog2Slow(int v);
+extern float VP8LFastSLog2Slow(int v);
+static WEBP_INLINE float VP8LFastLog2(int v) {
+ return (v < LOG_LOOKUP_IDX_MAX) ? kLog2Table[v] : VP8LFastLog2Slow(v);
+}
// Fast calculation of v * log2(v) for integer input.
-static WEBP_INLINE float VP8LFastSLog2(int v) { return VP8LFastLog2(v) * v; }
+static WEBP_INLINE float VP8LFastSLog2(int v) {
+ return (v < LOG_LOOKUP_IDX_MAX) ? kSLog2Table[v] : VP8LFastSLog2Slow(v);
+}
+
// In-place difference of each component with mod 256.
static WEBP_INLINE uint32_t VP8LSubPixels(uint32_t a, uint32_t b) {
diff --git a/src/dsp/upsampling.c b/src/dsp/upsampling.c
index 4855eb14..91d939cd 100644
--- a/src/dsp/upsampling.c
+++ b/src/dsp/upsampling.c
@@ -328,6 +328,11 @@ void WebPInitUpsamplers(void) {
WebPInitUpsamplersSSE2();
}
#endif
+#if defined(WEBP_USE_NEON)
+ if (VP8GetCPUInfo(kNEON)) {
+ WebPInitUpsamplersNEON();
+ }
+#endif
}
#endif // FANCY_UPSAMPLING
}
@@ -348,6 +353,11 @@ void WebPInitPremultiply(void) {
WebPInitPremultiplySSE2();
}
#endif
+#if defined(WEBP_USE_NEON)
+ if (VP8GetCPUInfo(kNEON)) {
+ WebPInitPremultiplyNEON();
+ }
+#endif
}
#endif // FANCY_UPSAMPLING
}
diff --git a/src/dsp/upsampling_neon.c b/src/dsp/upsampling_neon.c
new file mode 100644
index 00000000..00e2f892
--- /dev/null
+++ b/src/dsp/upsampling_neon.c
@@ -0,0 +1,292 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// NEON version of YUV to RGB upsampling functions.
+//
+// Author: mans@mansr.com (Mans Rullgard)
+// Based on SSE code by: somnath@google.com (Somnath Banerjee)
+
+#include "./dsp.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+#if defined(WEBP_USE_NEON)
+
+#include <assert.h>
+#include <arm_neon.h>
+#include <string.h>
+#include "./yuv.h"
+
+#ifdef FANCY_UPSAMPLING
+
+// Loads 9 pixels each from rows r1 and r2 and generates 16 pixels.
+#define UPSAMPLE_16PIXELS(r1, r2, out) { \
+ uint8x8_t a = vld1_u8(r1); \
+ uint8x8_t b = vld1_u8(r1 + 1); \
+ uint8x8_t c = vld1_u8(r2); \
+ uint8x8_t d = vld1_u8(r2 + 1); \
+ \
+ uint16x8_t al = vshll_n_u8(a, 1); \
+ uint16x8_t bl = vshll_n_u8(b, 1); \
+ uint16x8_t cl = vshll_n_u8(c, 1); \
+ uint16x8_t dl = vshll_n_u8(d, 1); \
+ \
+ uint8x8_t diag1, diag2; \
+ uint16x8_t sl; \
+ \
+ /* a + b + c + d */ \
+ sl = vaddl_u8(a, b); \
+ sl = vaddw_u8(sl, c); \
+ sl = vaddw_u8(sl, d); \
+ \
+ al = vaddq_u16(sl, al); /* 3a + b + c + d */ \
+ bl = vaddq_u16(sl, bl); /* a + 3b + c + d */ \
+ \
+ al = vaddq_u16(al, dl); /* 3a + b + c + 3d */ \
+ bl = vaddq_u16(bl, cl); /* a + 3b + 3c + d */ \
+ \
+ diag2 = vshrn_n_u16(al, 3); \
+ diag1 = vshrn_n_u16(bl, 3); \
+ \
+ a = vrhadd_u8(a, diag1); \
+ b = vrhadd_u8(b, diag2); \
+ c = vrhadd_u8(c, diag2); \
+ d = vrhadd_u8(d, diag1); \
+ \
+ { \
+ const uint8x8x2_t a_b = {{ a, b }}; \
+ const uint8x8x2_t c_d = {{ c, d }}; \
+ vst2_u8(out, a_b); \
+ vst2_u8(out + 32, c_d); \
+ } \
+}
+
+// Turn the macro into a function for reducing code-size when non-critical
+static void Upsample16Pixels(const uint8_t *r1, const uint8_t *r2,
+ uint8_t *out) {
+ UPSAMPLE_16PIXELS(r1, r2, out);
+}
+
+#define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \
+ uint8_t r1[9], r2[9]; \
+ memcpy(r1, (tb), (num_pixels)); \
+ memcpy(r2, (bb), (num_pixels)); \
+ /* replicate last byte */ \
+ memset(r1 + (num_pixels), r1[(num_pixels) - 1], 9 - (num_pixels)); \
+ memset(r2 + (num_pixels), r2[(num_pixels) - 1], 9 - (num_pixels)); \
+ Upsample16Pixels(r1, r2, out); \
+}
+
+#define CY 76283
+#define CVR 89858
+#define CUG 22014
+#define CVG 45773
+#define CUB 113618
+
+static const int16_t coef[4] = { CVR / 4, CUG, CVG / 2, CUB / 4 };
+
+#define CONVERT8(FMT, XSTEP, N, src_y, src_uv, out, cur_x) { \
+ int i; \
+ for (i = 0; i < N; i += 8) { \
+ int off = ((cur_x) + i) * XSTEP; \
+ uint8x8_t y = vld1_u8(src_y + (cur_x) + i); \
+ uint8x8_t u = vld1_u8((src_uv) + i); \
+ uint8x8_t v = vld1_u8((src_uv) + i + 16); \
+ int16x8_t yy = vreinterpretq_s16_u16(vsubl_u8(y, u16)); \
+ int16x8_t uu = vreinterpretq_s16_u16(vsubl_u8(u, u128)); \
+ int16x8_t vv = vreinterpretq_s16_u16(vsubl_u8(v, u128)); \
+ \
+ int16x8_t ud = vshlq_n_s16(uu, 1); \
+ int16x8_t vd = vshlq_n_s16(vv, 1); \
+ \
+ int32x4_t vrl = vqdmlal_lane_s16(vshll_n_s16(vget_low_s16(vv), 1), \
+ vget_low_s16(vd), cf16, 0); \
+ int32x4_t vrh = vqdmlal_lane_s16(vshll_n_s16(vget_high_s16(vv), 1), \
+ vget_high_s16(vd), cf16, 0); \
+ int16x8_t vr = vcombine_s16(vrshrn_n_s32(vrl, 16), \
+ vrshrn_n_s32(vrh, 16)); \
+ \
+ int32x4_t vl = vmovl_s16(vget_low_s16(vv)); \
+ int32x4_t vh = vmovl_s16(vget_high_s16(vv)); \
+ int32x4_t ugl = vmlal_lane_s16(vl, vget_low_s16(uu), cf16, 1); \
+ int32x4_t ugh = vmlal_lane_s16(vh, vget_high_s16(uu), cf16, 1); \
+ int32x4_t gcl = vqdmlal_lane_s16(ugl, vget_low_s16(vv), cf16, 2); \
+ int32x4_t gch = vqdmlal_lane_s16(ugh, vget_high_s16(vv), cf16, 2); \
+ int16x8_t gc = vcombine_s16(vrshrn_n_s32(gcl, 16), \
+ vrshrn_n_s32(gch, 16)); \
+ \
+ int32x4_t ubl = vqdmlal_lane_s16(vshll_n_s16(vget_low_s16(uu), 1), \
+ vget_low_s16(ud), cf16, 3); \
+ int32x4_t ubh = vqdmlal_lane_s16(vshll_n_s16(vget_high_s16(uu), 1), \
+ vget_high_s16(ud), cf16, 3); \
+ int16x8_t ub = vcombine_s16(vrshrn_n_s32(ubl, 16), \
+ vrshrn_n_s32(ubh, 16)); \
+ \
+ int32x4_t rl = vaddl_s16(vget_low_s16(yy), vget_low_s16(vr)); \
+ int32x4_t rh = vaddl_s16(vget_high_s16(yy), vget_high_s16(vr)); \
+ int32x4_t gl = vsubl_s16(vget_low_s16(yy), vget_low_s16(gc)); \
+ int32x4_t gh = vsubl_s16(vget_high_s16(yy), vget_high_s16(gc)); \
+ int32x4_t bl = vaddl_s16(vget_low_s16(yy), vget_low_s16(ub)); \
+ int32x4_t bh = vaddl_s16(vget_high_s16(yy), vget_high_s16(ub)); \
+ \
+ rl = vmulq_lane_s32(rl, cf32, 0); \
+ rh = vmulq_lane_s32(rh, cf32, 0); \
+ gl = vmulq_lane_s32(gl, cf32, 0); \
+ gh = vmulq_lane_s32(gh, cf32, 0); \
+ bl = vmulq_lane_s32(bl, cf32, 0); \
+ bh = vmulq_lane_s32(bh, cf32, 0); \
+ \
+ y = vqmovun_s16(vcombine_s16(vrshrn_n_s32(rl, 16), \
+ vrshrn_n_s32(rh, 16))); \
+ u = vqmovun_s16(vcombine_s16(vrshrn_n_s32(gl, 16), \
+ vrshrn_n_s32(gh, 16))); \
+ v = vqmovun_s16(vcombine_s16(vrshrn_n_s32(bl, 16), \
+ vrshrn_n_s32(bh, 16))); \
+ STR_ ## FMT(out + off, y, u, v); \
+ } \
+}
+
+#define v255 vmov_n_u8(255)
+
+#define STR_Rgb(out, r, g, b) do { \
+ const uint8x8x3_t r_g_b = {{ r, g, b }}; \
+ vst3_u8(out, r_g_b); \
+} while (0)
+
+#define STR_Bgr(out, r, g, b) do { \
+ const uint8x8x3_t b_g_r = {{ b, g, r }}; \
+ vst3_u8(out, b_g_r); \
+} while (0)
+
+#define STR_Rgba(out, r, g, b) do { \
+ const uint8x8x4_t r_g_b_v255 = {{ r, g, b, v255 }}; \
+ vst4_u8(out, r_g_b_v255); \
+} while (0)
+
+#define STR_Bgra(out, r, g, b) do { \
+ const uint8x8x4_t b_g_r_v255 = {{ b, g, r, v255 }}; \
+ vst4_u8(out, b_g_r_v255); \
+} while (0)
+
+#define CONVERT1(FMT, XSTEP, N, src_y, src_uv, rgb, cur_x) { \
+ int i; \
+ for (i = 0; i < N; i++) { \
+ int off = ((cur_x) + i) * XSTEP; \
+ int y = src_y[(cur_x) + i]; \
+ int u = (src_uv)[i]; \
+ int v = (src_uv)[i + 16]; \
+ VP8YuvTo ## FMT(y, u, v, rgb + off); \
+ } \
+}
+
+#define CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, uv, \
+ top_dst, bottom_dst, cur_x, len) { \
+ if (top_y) { \
+ CONVERT8(FMT, XSTEP, len, top_y, uv, top_dst, cur_x) \
+ } \
+ if (bottom_y) { \
+ CONVERT8(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x) \
+ } \
+}
+
+#define CONVERT2RGB_1(FMT, XSTEP, top_y, bottom_y, uv, \
+ top_dst, bottom_dst, cur_x, len) { \
+ if (top_y) { \
+ CONVERT1(FMT, XSTEP, len, top_y, uv, top_dst, cur_x); \
+ } \
+ if (bottom_y) { \
+ CONVERT1(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x); \
+ } \
+}
+
+#define NEON_UPSAMPLE_FUNC(FUNC_NAME, FMT, XSTEP) \
+static void FUNC_NAME(const uint8_t *top_y, const uint8_t *bottom_y, \
+ const uint8_t *top_u, const uint8_t *top_v, \
+ const uint8_t *cur_u, const uint8_t *cur_v, \
+ uint8_t *top_dst, uint8_t *bottom_dst, int len) { \
+ int block; \
+ /* 16 byte aligned array to cache reconstructed u and v */ \
+ uint8_t uv_buf[2 * 32 + 15]; \
+ uint8_t *const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
+ const int uv_len = (len + 1) >> 1; \
+ /* 9 pixels must be read-able for each block */ \
+ const int num_blocks = (uv_len - 1) >> 3; \
+ const int leftover = uv_len - num_blocks * 8; \
+ const int last_pos = 1 + 16 * num_blocks; \
+ \
+ const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \
+ const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \
+ \
+ const int16x4_t cf16 = vld1_s16(coef); \
+ const int32x2_t cf32 = vmov_n_s32(CY); \
+ const uint8x8_t u16 = vmov_n_u8(16); \
+ const uint8x8_t u128 = vmov_n_u8(128); \
+ \
+ /* Treat the first pixel in regular way */ \
+ if (top_y) { \
+ const int u0 = (top_u[0] + u_diag) >> 1; \
+ const int v0 = (top_v[0] + v_diag) >> 1; \
+ VP8YuvTo ## FMT(top_y[0], u0, v0, top_dst); \
+ } \
+ if (bottom_y) { \
+ const int u0 = (cur_u[0] + u_diag) >> 1; \
+ const int v0 = (cur_v[0] + v_diag) >> 1; \
+ VP8YuvTo ## FMT(bottom_y[0], u0, v0, bottom_dst); \
+ } \
+ \
+ for (block = 0; block < num_blocks; ++block) { \
+ UPSAMPLE_16PIXELS(top_u, cur_u, r_uv); \
+ UPSAMPLE_16PIXELS(top_v, cur_v, r_uv + 16); \
+ CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, r_uv, \
+ top_dst, bottom_dst, 16 * block + 1, 16); \
+ top_u += 8; \
+ cur_u += 8; \
+ top_v += 8; \
+ cur_v += 8; \
+ } \
+ \
+ UPSAMPLE_LAST_BLOCK(top_u, cur_u, leftover, r_uv); \
+ UPSAMPLE_LAST_BLOCK(top_v, cur_v, leftover, r_uv + 16); \
+ CONVERT2RGB_1(FMT, XSTEP, top_y, bottom_y, r_uv, \
+ top_dst, bottom_dst, last_pos, len - last_pos); \
+}
+
+// NEON variants of the fancy upsampler.
+NEON_UPSAMPLE_FUNC(UpsampleRgbLinePairNEON, Rgb, 3)
+NEON_UPSAMPLE_FUNC(UpsampleBgrLinePairNEON, Bgr, 3)
+NEON_UPSAMPLE_FUNC(UpsampleRgbaLinePairNEON, Rgba, 4)
+NEON_UPSAMPLE_FUNC(UpsampleBgraLinePairNEON, Bgra, 4)
+
+#endif // FANCY_UPSAMPLING
+
+#endif // WEBP_USE_NEON
+
+//------------------------------------------------------------------------------
+
+extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
+
+void WebPInitUpsamplersNEON(void) {
+#if defined(WEBP_USE_NEON)
+ WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePairNEON;
+ WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePairNEON;
+ WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePairNEON;
+ WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePairNEON;
+#endif // WEBP_USE_NEON
+}
+
+void WebPInitPremultiplyNEON(void) {
+#if defined(WEBP_USE_NEON)
+ WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePairNEON;
+ WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePairNEON;
+#endif // WEBP_USE_NEON
+}
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/src/dsp/upsampling_sse2.c b/src/dsp/upsampling_sse2.c
index 8cb275a0..ba075d11 100644
--- a/src/dsp/upsampling_sse2.c
+++ b/src/dsp/upsampling_sse2.c
@@ -11,6 +11,10 @@
#include "./dsp.h"
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
#if defined(WEBP_USE_SSE2)
#include <assert.h>
@@ -18,10 +22,6 @@
#include <string.h>
#include "./yuv.h"
-#if defined(__cplusplus) || defined(c_plusplus)
-extern "C" {
-#endif
-
#ifdef FANCY_UPSAMPLING
// We compute (9*a + 3*b + 3*c + d + 8) / 16 as follows
@@ -51,12 +51,12 @@ extern "C" {
// pack and store two alterning pixel rows
#define PACK_AND_STORE(a, b, da, db, out) do { \
- const __m128i ta = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \
- const __m128i tb = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \
- const __m128i t1 = _mm_unpacklo_epi8(ta, tb); \
- const __m128i t2 = _mm_unpackhi_epi8(ta, tb); \
- _mm_store_si128(((__m128i*)(out)) + 0, t1); \
- _mm_store_si128(((__m128i*)(out)) + 1, t2); \
+ const __m128i t_a = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \
+ const __m128i t_b = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \
+ const __m128i t_1 = _mm_unpacklo_epi8(t_a, t_b); \
+ const __m128i t_2 = _mm_unpackhi_epi8(t_a, t_b); \
+ _mm_store_si128(((__m128i*)(out)) + 0, t_1); \
+ _mm_store_si128(((__m128i*)(out)) + 1, t_2); \
} while (0)
// Loads 17 pixels each from rows r1 and r2 and generates 32 pixels.
@@ -128,7 +128,7 @@ static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
const uint8_t* top_u, const uint8_t* top_v, \
const uint8_t* cur_u, const uint8_t* cur_v, \
uint8_t* top_dst, uint8_t* bottom_dst, int len) { \
- int b; \
+ int block; \
/* 16 byte aligned array to cache reconstructed u and v */ \
uint8_t uv_buf[4 * 32 + 15]; \
uint8_t* const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
@@ -154,11 +154,11 @@ static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
FUNC(bottom_y[0], u0, v0, bottom_dst); \
} \
\
- for (b = 0; b < num_blocks; ++b) { \
+ for (block = 0; block < num_blocks; ++block) { \
UPSAMPLE_32PIXELS(top_u, cur_u, r_uv + 0 * 32); \
UPSAMPLE_32PIXELS(top_v, cur_v, r_uv + 1 * 32); \
CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, r_uv, top_dst, bottom_dst, \
- 32 * b + 1, 32) \
+ 32 * block + 1, 32) \
top_u += 16; \
cur_u += 16; \
top_v += 16; \
@@ -184,26 +184,32 @@ SSE2_UPSAMPLE_FUNC(UpsampleBgraLinePairSSE2, VP8YuvToBgra, 4)
#undef CONVERT2RGB
#undef SSE2_UPSAMPLE_FUNC
+#endif // FANCY_UPSAMPLING
+
+#endif // WEBP_USE_SSE2
+
//------------------------------------------------------------------------------
extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
void WebPInitUpsamplersSSE2(void) {
+#if defined(WEBP_USE_SSE2)
WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePairSSE2;
WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePairSSE2;
WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePairSSE2;
WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePairSSE2;
+#endif // WEBP_USE_SSE2
}
void WebPInitPremultiplySSE2(void) {
+#if defined(WEBP_USE_SSE2)
WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePairSSE2;
WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePairSSE2;
+#endif // WEBP_USE_SSE2
}
-#endif // FANCY_UPSAMPLING
-
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
-#endif // WEBP_USE_SSE2
+
diff --git a/src/dsp/yuv.c b/src/dsp/yuv.c
index 7f05f9a3..38895281 100644
--- a/src/dsp/yuv.c
+++ b/src/dsp/yuv.c
@@ -33,6 +33,7 @@ void VP8YUVInit(void) {
if (done) {
return;
}
+#ifndef USE_YUVj
for (i = 0; i < 256; ++i) {
VP8kVToR[i] = (89858 * (i - 128) + YUV_HALF) >> YUV_FIX;
VP8kUToG[i] = -22014 * (i - 128) + YUV_HALF;
@@ -44,6 +45,20 @@ void VP8YUVInit(void) {
VP8kClip[i - YUV_RANGE_MIN] = clip(k, 255);
VP8kClip4Bits[i - YUV_RANGE_MIN] = clip((k + 8) >> 4, 15);
}
+#else
+ for (i = 0; i < 256; ++i) {
+ VP8kVToR[i] = (91881 * (i - 128) + YUV_HALF) >> YUV_FIX;
+ VP8kUToG[i] = -22554 * (i - 128) + YUV_HALF;
+ VP8kVToG[i] = -46802 * (i - 128);
+ VP8kUToB[i] = (116130 * (i - 128) + YUV_HALF) >> YUV_FIX;
+ }
+ for (i = YUV_RANGE_MIN; i < YUV_RANGE_MAX; ++i) {
+ const int k = i;
+ VP8kClip[i - YUV_RANGE_MIN] = clip(k, 255);
+ VP8kClip4Bits[i - YUV_RANGE_MIN] = clip((k + 8) >> 4, 15);
+ }
+#endif
+
done = 1;
}
diff --git a/src/dsp/yuv.h b/src/dsp/yuv.h
index ee3587e3..add167ea 100644
--- a/src/dsp/yuv.h
+++ b/src/dsp/yuv.h
@@ -7,6 +7,25 @@
//
// inline YUV<->RGB conversion function
//
+// The exact naming is Y'CbCr, following the ITU-R BT.601 standard.
+// More information at: http://en.wikipedia.org/wiki/YCbCr
+// Y = 0.2569 * R + 0.5044 * G + 0.0979 * B + 16
+// U = -0.1483 * R - 0.2911 * G + 0.4394 * B + 128
+// V = 0.4394 * R - 0.3679 * G - 0.0715 * B + 128
+// We use 16bit fixed point operations for RGB->YUV conversion.
+//
+// For the Y'CbCr to RGB conversion, the BT.601 specification reads:
+// R = 1.164 * (Y-16) + 1.596 * (V-128)
+// G = 1.164 * (Y-16) - 0.813 * (V-128) - 0.391 * (U-128)
+// B = 1.164 * (Y-16) + 2.018 * (U-128)
+// where Y is in the [16,235] range, and U/V in the [16,240] range.
+// But the common term 1.164 * (Y-16) can be handled as an offset in the
+// VP8kClip[] table. So the formulae should be read as:
+// R = 1.164 * [Y + 1.371 * (V-128) ] - 18.624
+// G = 1.164 * [Y - 0.698 * (V-128) - 0.336 * (U-128)] - 18.624
+// B = 1.164 * [Y + 1.733 * (U-128)] - 18.624
+// once factorized. Here too, 16bit fixed precision is used.
+//
// Author: Skal (pascal.massimino@gmail.com)
#ifndef WEBP_DSP_YUV_H_
@@ -14,13 +33,15 @@
#include "../dec/decode_vp8.h"
-/*
- * Define ANDROID_WEBP_RGB to enable specific optimizations for Android
- * RGBA_4444 & RGB_565 color support.
- *
- */
-
-#define ANDROID_WEBP_RGB
+#if defined(WEBP_EXPERIMENTAL_FEATURES)
+// Do NOT activate this feature for real compression. This is only experimental!
+// This flag is for comparison purpose against JPEG's "YUVj" natural colorspace.
+// This colorspace is close to Rec.601's Y'CbCr model with the notable
+// difference of allowing larger range for luma/chroma.
+// See http://en.wikipedia.org/wiki/YCbCr#JPEG_conversion paragraph, and its
+// difference with http://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion
+// #define USE_YUVj
+#endif
//------------------------------------------------------------------------------
// YUV -> RGB conversion
@@ -53,16 +74,16 @@ static WEBP_INLINE void VP8YuvToRgb565(uint8_t y, uint8_t u, uint8_t v,
const int r_off = VP8kVToR[v];
const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
const int b_off = VP8kUToB[u];
-#ifdef ANDROID_WEBP_RGB
- rgb[1] = ((VP8kClip[y + r_off - YUV_RANGE_MIN] & 0xf8) |
- (VP8kClip[y + g_off - YUV_RANGE_MIN] >> 5));
- rgb[0] = (((VP8kClip[y + g_off - YUV_RANGE_MIN] << 3) & 0xe0) |
- (VP8kClip[y + b_off - YUV_RANGE_MIN] >> 3));
+ const uint8_t rg = ((VP8kClip[y + r_off - YUV_RANGE_MIN] & 0xf8) |
+ (VP8kClip[y + g_off - YUV_RANGE_MIN] >> 5));
+ const uint8_t gb = (((VP8kClip[y + g_off - YUV_RANGE_MIN] << 3) & 0xe0) |
+ (VP8kClip[y + b_off - YUV_RANGE_MIN] >> 3));
+#ifdef WEBP_SWAP_16BIT_CSP
+ rgb[0] = gb;
+ rgb[1] = rg;
#else
- rgb[0] = ((VP8kClip[y + r_off - YUV_RANGE_MIN] & 0xf8) |
- (VP8kClip[y + g_off - YUV_RANGE_MIN] >> 5));
- rgb[1] = (((VP8kClip[y + g_off - YUV_RANGE_MIN] << 3) & 0xe0) |
- (VP8kClip[y + b_off - YUV_RANGE_MIN] >> 3));
+ rgb[0] = rg;
+ rgb[1] = gb;
#endif
}
@@ -77,14 +98,15 @@ static WEBP_INLINE void VP8YuvToRgba4444(uint8_t y, uint8_t u, uint8_t v,
const int r_off = VP8kVToR[v];
const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
const int b_off = VP8kUToB[u];
-#ifdef ANDROID_WEBP_RGB
- argb[1] = ((VP8kClip4Bits[y + r_off - YUV_RANGE_MIN] << 4) |
- VP8kClip4Bits[y + g_off - YUV_RANGE_MIN]);
- argb[0] = 0x0f | (VP8kClip4Bits[y + b_off - YUV_RANGE_MIN] << 4);
+ const uint8_t rg = ((VP8kClip4Bits[y + r_off - YUV_RANGE_MIN] << 4) |
+ VP8kClip4Bits[y + g_off - YUV_RANGE_MIN]);
+ const uint8_t ba = (VP8kClip4Bits[y + b_off - YUV_RANGE_MIN] << 4) | 0x0f;
+#ifdef WEBP_SWAP_16BIT_CSP
+ argb[0] = ba;
+ argb[1] = rg;
#else
- argb[0] = ((VP8kClip4Bits[y + r_off - YUV_RANGE_MIN] << 4) |
- VP8kClip4Bits[y + g_off - YUV_RANGE_MIN]);
- argb[1] = 0x0f | (VP8kClip4Bits[y + b_off - YUV_RANGE_MIN] << 4);
+ argb[0] = rg;
+ argb[1] = ba;
#endif
}
@@ -115,18 +137,14 @@ void VP8YUVInit(void);
//------------------------------------------------------------------------------
// RGB -> YUV conversion
-// The exact naming is Y'CbCr, following the ITU-R BT.601 standard.
-// More information at: http://en.wikipedia.org/wiki/YCbCr
-// Y = 0.2569 * R + 0.5044 * G + 0.0979 * B + 16
-// U = -0.1483 * R - 0.2911 * G + 0.4394 * B + 128
-// V = 0.4394 * R - 0.3679 * G - 0.0715 * B + 128
-// We use 16bit fixed point operations.
static WEBP_INLINE int VP8ClipUV(int v) {
- v = (v + (257 << (YUV_FIX + 2 - 1))) >> (YUV_FIX + 2);
- return ((v & ~0xff) == 0) ? v : (v < 0) ? 0 : 255;
+ v = (v + (257 << (YUV_FIX + 2 - 1))) >> (YUV_FIX + 2);
+ return ((v & ~0xff) == 0) ? v : (v < 0) ? 0 : 255;
}
+#ifndef USE_YUVj
+
static WEBP_INLINE int VP8RGBToY(int r, int g, int b) {
const int kRound = (1 << (YUV_FIX - 1)) + (16 << YUV_FIX);
const int luma = 16839 * r + 33059 * g + 6420 * b;
@@ -134,13 +152,38 @@ static WEBP_INLINE int VP8RGBToY(int r, int g, int b) {
}
static WEBP_INLINE int VP8RGBToU(int r, int g, int b) {
- return VP8ClipUV(-9719 * r - 19081 * g + 28800 * b);
+ const int u = -9719 * r - 19081 * g + 28800 * b;
+ return VP8ClipUV(u);
+}
+
+static WEBP_INLINE int VP8RGBToV(int r, int g, int b) {
+ const int v = +28800 * r - 24116 * g - 4684 * b;
+ return VP8ClipUV(v);
+}
+
+#else
+
+// This JPEG-YUV colorspace, only for comparison!
+// These are also 16-bit precision coefficients from Rec.601, but with full
+// [0..255] output range.
+static WEBP_INLINE int VP8RGBToY(int r, int g, int b) {
+ const int kRound = (1 << (YUV_FIX - 1));
+ const int luma = 19595 * r + 38470 * g + 7471 * b;
+ return (luma + kRound) >> YUV_FIX; // no need to clip
+}
+
+static WEBP_INLINE int VP8RGBToU(int r, int g, int b) {
+ const int u = -11058 * r - 21710 * g + 32768 * b;
+ return VP8ClipUV(u);
}
static WEBP_INLINE int VP8RGBToV(int r, int g, int b) {
- return VP8ClipUV(+28800 * r - 24116 * g - 4684 * b);
+ const int v = 32768 * r - 27439 * g - 5329 * b;
+ return VP8ClipUV(v);
}
+#endif // USE_YUVj
+
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif