aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/dec/Android.mk9
-rw-r--r--src/dec/frame.c101
-rw-r--r--src/dec/idec.c41
-rw-r--r--src/dec/io.c74
-rw-r--r--src/dec/vp8.c109
-rw-r--r--src/dec/vp8i.h12
-rw-r--r--src/dec/vp8l.c8
-rw-r--r--src/dec/webp.c6
-rw-r--r--src/demux/demux.c907
-rw-r--r--src/dsp/cpu-features.c396
-rw-r--r--src/dsp/cpu-features.h56
-rw-r--r--src/dsp/cpu.c28
-rw-r--r--src/dsp/dec_neon.c88
-rw-r--r--src/dsp/dec_sse2.c37
-rw-r--r--src/dsp/dsp.h23
-rw-r--r--src/dsp/enc.c117
-rw-r--r--src/dsp/enc_neon.c661
-rw-r--r--src/dsp/enc_sse2.c321
-rw-r--r--src/dsp/lossless.c27
-rw-r--r--src/dsp/upsampling.c10
-rw-r--r--src/dsp/upsampling_neon.c292
-rw-r--r--src/dsp/upsampling_sse2.c38
-rw-r--r--src/dsp/yuv.c15
-rw-r--r--src/dsp/yuv.h109
-rw-r--r--src/enc/Android.mk10
-rw-r--r--src/enc/alpha.c13
-rw-r--r--src/enc/analysis.c214
-rw-r--r--src/enc/backward_references.c174
-rw-r--r--src/enc/backward_references.h6
-rw-r--r--src/enc/frame.c252
-rw-r--r--src/enc/histogram.c34
-rw-r--r--src/enc/picture.c153
-rw-r--r--src/enc/quant.c48
-rw-r--r--src/enc/syntax.c17
-rw-r--r--src/enc/vp8enci.h65
-rw-r--r--src/enc/vp8l.c121
-rw-r--r--src/utils/bit_reader.c4
-rw-r--r--src/utils/bit_reader.h38
-rw-r--r--src/utils/huffman_encode.c15
-rw-r--r--src/utils/utils.c9
-rw-r--r--src/utils/utils.h39
41 files changed, 3720 insertions, 977 deletions
diff --git a/src/dec/Android.mk b/src/dec/Android.mk
index ab795aee..e3148dc7 100644
--- a/src/dec/Android.mk
+++ b/src/dec/Android.mk
@@ -28,27 +28,26 @@ LOCAL_SRC_FILES := \
vp8l.c \
webp.c \
../dsp/cpu.c \
+ ../dsp/cpu-features.c \
../dsp/dec.c \
../dsp/dec_neon.c \
../dsp/dec_sse2.c \
- ../dsp/enc.c \
- ../dsp/enc_sse2.c \
../dsp/lossless.c \
../dsp/upsampling.c \
+ ../dsp/upsampling_neon.c \
../dsp/upsampling_sse2.c \
../dsp/yuv.c \
+ ../demux/demux.c \
../utils/bit_reader.c \
- ../utils/bit_writer.c \
../utils/color_cache.c \
../utils/filters.c \
../utils/huffman.c \
- ../utils/huffman_encode.c \
../utils/quant_levels.c \
../utils/rescaler.c \
../utils/thread.c \
../utils/utils.c
-LOCAL_CFLAGS := -DANDROID
+LOCAL_CFLAGS := -DANDROID -DWEBP_SWAP_16BIT_CSP
LOCAL_C_INCLUDES += \
$(LOCAL_PATH) \
diff --git a/src/dec/frame.c b/src/dec/frame.c
index 9c91a48e..911c7ffc 100644
--- a/src/dec/frame.c
+++ b/src/dec/frame.c
@@ -97,54 +97,51 @@ static void FilterRow(const VP8Decoder* const dec) {
}
//------------------------------------------------------------------------------
+// Precompute the filtering strength for each segment and each i4x4/i16x16 mode.
-void VP8StoreBlock(VP8Decoder* const dec) {
+static void PrecomputeFilterStrengths(VP8Decoder* const dec) {
if (dec->filter_type_ > 0) {
- VP8FInfo* const info = dec->f_info_ + dec->mb_x_;
- const int skip = dec->mb_info_[dec->mb_x_].skip_;
- int level = dec->filter_levels_[dec->segment_];
- if (dec->filter_hdr_.use_lf_delta_) {
- // TODO(skal): only CURRENT is handled for now.
- level += dec->filter_hdr_.ref_lf_delta_[0];
- if (dec->is_i4x4_) {
- level += dec->filter_hdr_.mode_lf_delta_[0];
- }
- }
- level = (level < 0) ? 0 : (level > 63) ? 63 : level;
- info->f_level_ = level;
-
- if (dec->filter_hdr_.sharpness_ > 0) {
- if (dec->filter_hdr_.sharpness_ > 4) {
- level >>= 2;
+ int s;
+ const VP8FilterHeader* const hdr = &dec->filter_hdr_;
+ for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
+ int i4x4;
+ // First, compute the initial level
+ int base_level;
+ if (dec->segment_hdr_.use_segment_) {
+ base_level = dec->segment_hdr_.filter_strength_[s];
+ if (!dec->segment_hdr_.absolute_delta_) {
+ base_level += hdr->level_;
+ }
} else {
- level >>= 1;
+ base_level = hdr->level_;
}
- if (level > 9 - dec->filter_hdr_.sharpness_) {
- level = 9 - dec->filter_hdr_.sharpness_;
+ for (i4x4 = 0; i4x4 <= 1; ++i4x4) {
+ VP8FInfo* const info = &dec->fstrengths_[s][i4x4];
+ int level = base_level;
+ if (hdr->use_lf_delta_) {
+ // TODO(skal): only CURRENT is handled for now.
+ level += hdr->ref_lf_delta_[0];
+ if (i4x4) {
+ level += hdr->mode_lf_delta_[0];
+ }
+ }
+ level = (level < 0) ? 0 : (level > 63) ? 63 : level;
+ info->f_level_ = level;
+
+ if (hdr->sharpness_ > 0) {
+ if (hdr->sharpness_ > 4) {
+ level >>= 2;
+ } else {
+ level >>= 1;
+ }
+ if (level > 9 - hdr->sharpness_) {
+ level = 9 - hdr->sharpness_;
+ }
+ }
+ info->f_ilevel_ = (level < 1) ? 1 : level;
+ info->f_inner_ = 0;
}
}
-
- info->f_ilevel_ = (level < 1) ? 1 : level;
- info->f_inner_ = (!skip || dec->is_i4x4_);
- }
- {
- // Transfer samples to row cache
- int y;
- const int y_offset = dec->cache_id_ * 16 * dec->cache_y_stride_;
- const int uv_offset = dec->cache_id_ * 8 * dec->cache_uv_stride_;
- uint8_t* const ydst = dec->cache_y_ + dec->mb_x_ * 16 + y_offset;
- uint8_t* const udst = dec->cache_u_ + dec->mb_x_ * 8 + uv_offset;
- uint8_t* const vdst = dec->cache_v_ + dec->mb_x_ * 8 + uv_offset;
- for (y = 0; y < 16; ++y) {
- memcpy(ydst + y * dec->cache_y_stride_,
- dec->yuv_b_ + Y_OFF + y * BPS, 16);
- }
- for (y = 0; y < 8; ++y) {
- memcpy(udst + y * dec->cache_uv_stride_,
- dec->yuv_b_ + U_OFF + y * BPS, 8);
- memcpy(vdst + y * dec->cache_uv_stride_,
- dec->yuv_b_ + V_OFF + y * BPS, 8);
- }
}
}
@@ -339,6 +336,7 @@ VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io) {
dec->br_mb_y_ = dec->mb_h_;
}
}
+ PrecomputeFilterStrengths(dec);
return VP8_STATUS_OK;
}
@@ -496,6 +494,7 @@ static int AllocateMemory(VP8Decoder* const dec) {
// alpha plane
dec->alpha_plane_ = alpha_size ? (uint8_t*)mem : NULL;
mem += alpha_size;
+ assert(mem <= (uint8_t*)dec->mem_ + dec->mem_size_);
// note: left-info is initialized once for all.
memset(dec->mb_info_ - 1, 0, mb_info_size);
@@ -551,6 +550,7 @@ static WEBP_INLINE void Copy32b(uint8_t* dst, uint8_t* src) {
}
void VP8ReconstructBlock(VP8Decoder* const dec) {
+ int j;
uint8_t* const y_dst = dec->yuv_b_ + Y_OFF;
uint8_t* const u_dst = dec->yuv_b_ + U_OFF;
uint8_t* const v_dst = dec->yuv_b_ + V_OFF;
@@ -558,7 +558,6 @@ void VP8ReconstructBlock(VP8Decoder* const dec) {
// Rotate in the left samples from previously decoded block. We move four
// pixels at a time for alignment reason, and because of in-loop filter.
if (dec->mb_x_ > 0) {
- int j;
for (j = -1; j < 16; ++j) {
Copy32b(&y_dst[j * BPS - 4], &y_dst[j * BPS + 12]);
}
@@ -567,7 +566,6 @@ void VP8ReconstructBlock(VP8Decoder* const dec) {
Copy32b(&v_dst[j * BPS - 4], &v_dst[j * BPS + 4]);
}
} else {
- int j;
for (j = 0; j < 16; ++j) {
y_dst[j * BPS - 1] = 129;
}
@@ -670,6 +668,21 @@ void VP8ReconstructBlock(VP8Decoder* const dec) {
}
}
}
+ // Transfer reconstructed samples from yuv_b_ cache to final destination.
+ {
+ const int y_offset = dec->cache_id_ * 16 * dec->cache_y_stride_;
+ const int uv_offset = dec->cache_id_ * 8 * dec->cache_uv_stride_;
+ uint8_t* const y_out = dec->cache_y_ + dec->mb_x_ * 16 + y_offset;
+ uint8_t* const u_out = dec->cache_u_ + dec->mb_x_ * 8 + uv_offset;
+ uint8_t* const v_out = dec->cache_v_ + dec->mb_x_ * 8 + uv_offset;
+ for (j = 0; j < 16; ++j) {
+ memcpy(y_out + j * dec->cache_y_stride_, y_dst + j * BPS, 16);
+ }
+ for (j = 0; j < 8; ++j) {
+ memcpy(u_out + j * dec->cache_uv_stride_, u_dst + j * BPS, 8);
+ memcpy(v_out + j * dec->cache_uv_stride_, v_dst + j * BPS, 8);
+ }
+ }
}
//------------------------------------------------------------------------------
diff --git a/src/dec/idec.c b/src/dec/idec.c
index 7df790ce..17810c83 100644
--- a/src/dec/idec.c
+++ b/src/dec/idec.c
@@ -425,9 +425,8 @@ static VP8StatusCode DecodeRemaining(WebPIDecoder* const idec) {
}
return VP8_STATUS_SUSPENDED;
}
+ // Reconstruct and emit samples.
VP8ReconstructBlock(dec);
- // Store data and save block's filtering params
- VP8StoreBlock(dec);
// Release buffer only if there is only one partition
if (dec->num_parts_ == 1) {
@@ -596,12 +595,22 @@ void WebPIDelete(WebPIDecoder* idec) {
WebPIDecoder* WebPINewRGB(WEBP_CSP_MODE mode, uint8_t* output_buffer,
size_t output_buffer_size, int output_stride) {
+ const int is_external_memory = (output_buffer != NULL);
WebPIDecoder* idec;
+
if (mode >= MODE_YUV) return NULL;
+ if (!is_external_memory) { // Overwrite parameters to sane values.
+ output_buffer_size = 0;
+ output_stride = 0;
+ } else { // A buffer was passed. Validate the other params.
+ if (output_stride == 0 || output_buffer_size == 0) {
+ return NULL; // invalid parameter.
+ }
+ }
idec = WebPINewDecoder(NULL);
if (idec == NULL) return NULL;
idec->output_.colorspace = mode;
- idec->output_.is_external_memory = 1;
+ idec->output_.is_external_memory = is_external_memory;
idec->output_.u.RGBA.rgba = output_buffer;
idec->output_.u.RGBA.stride = output_stride;
idec->output_.u.RGBA.size = output_buffer_size;
@@ -612,10 +621,30 @@ WebPIDecoder* WebPINewYUVA(uint8_t* luma, size_t luma_size, int luma_stride,
uint8_t* u, size_t u_size, int u_stride,
uint8_t* v, size_t v_size, int v_stride,
uint8_t* a, size_t a_size, int a_stride) {
- WebPIDecoder* const idec = WebPINewDecoder(NULL);
+ const int is_external_memory = (luma != NULL);
+ WebPIDecoder* idec;
+ WEBP_CSP_MODE colorspace;
+
+ if (!is_external_memory) { // Overwrite parameters to sane values.
+ luma_size = u_size = v_size = a_size = 0;
+ luma_stride = u_stride = v_stride = a_stride = 0;
+ u = v = a = NULL;
+ colorspace = MODE_YUVA;
+ } else { // A luma buffer was passed. Validate the other parameters.
+ if (u == NULL || v == NULL) return NULL;
+ if (luma_size == 0 || u_size == 0 || v_size == 0) return NULL;
+ if (luma_stride == 0 || u_stride == 0 || v_stride == 0) return NULL;
+ if (a != NULL) {
+ if (a_size == 0 || a_stride == 0) return NULL;
+ }
+ colorspace = (a == NULL) ? MODE_YUV : MODE_YUVA;
+ }
+
+ idec = WebPINewDecoder(NULL);
if (idec == NULL) return NULL;
- idec->output_.colorspace = (a == NULL) ? MODE_YUV : MODE_YUVA;
- idec->output_.is_external_memory = 1;
+
+ idec->output_.colorspace = colorspace;
+ idec->output_.is_external_memory = is_external_memory;
idec->output_.u.YUVA.y = luma;
idec->output_.u.YUVA.y_stride = luma_stride;
idec->output_.u.YUVA.y_size = luma_size;
diff --git a/src/dec/io.c b/src/dec/io.c
index c5746f74..594804c2 100644
--- a/src/dec/io.c
+++ b/src/dec/io.c
@@ -111,7 +111,7 @@ static int EmitFancyRGB(const VP8Io* const io, WebPDecParams* const p) {
const uint8_t* top_u = p->tmp_u;
const uint8_t* top_v = p->tmp_v;
int y = io->mb_y;
- int y_end = io->mb_y + io->mb_h;
+ const int y_end = io->mb_y + io->mb_h;
const int mb_w = io->mb_w;
const int uv_w = (mb_w + 1) / 2;
@@ -150,7 +150,7 @@ static int EmitFancyRGB(const VP8Io* const io, WebPDecParams* const p) {
// Process the very last row of even-sized picture
if (!(y_end & 1)) {
upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v,
- dst + buf->stride, NULL, mb_w);
+ dst + buf->stride, NULL, mb_w);
}
}
return num_lines_out;
@@ -203,7 +203,7 @@ static int GetAlphaSourceRow(const VP8Io* const io,
*alpha -= io->width;
}
if (io->crop_top + io->mb_y + io->mb_h == io->crop_bottom) {
- // If it's the very last call, we process all the remaing rows!
+ // If it's the very last call, we process all the remaining rows!
*num_rows = io->crop_bottom - io->crop_top - start_y;
}
}
@@ -214,32 +214,30 @@ static int EmitAlphaRGB(const VP8Io* const io, WebPDecParams* const p) {
const uint8_t* alpha = io->a;
if (alpha != NULL) {
const int mb_w = io->mb_w;
- int i, j;
const WEBP_CSP_MODE colorspace = p->output->colorspace;
const int alpha_first =
(colorspace == MODE_ARGB || colorspace == MODE_Argb);
const WebPRGBABuffer* const buf = &p->output->u.RGBA;
int num_rows;
const int start_y = GetAlphaSourceRow(io, &alpha, &num_rows);
+ uint8_t* const base_rgba = buf->rgba + start_y * buf->stride;
+ uint8_t* dst = base_rgba + (alpha_first ? 0 : 3);
uint32_t alpha_mask = 0xff;
+ int i, j;
- {
- uint8_t* const base_rgba = buf->rgba + start_y * buf->stride;
- uint8_t* dst = base_rgba + (alpha_first ? 0 : 3);
- for (j = 0; j < num_rows; ++j) {
- for (i = 0; i < mb_w; ++i) {
- const uint32_t alpha_value = alpha[i];
- dst[4 * i] = alpha_value;
- alpha_mask &= alpha_value;
- }
- alpha += io->width;
- dst += buf->stride;
- }
- // alpha_mask is < 0xff if there's non-trivial alpha to premultiply with.
- if (alpha_mask != 0xff && WebPIsPremultipliedMode(colorspace)) {
- WebPApplyAlphaMultiply(base_rgba, alpha_first,
- mb_w, num_rows, buf->stride);
+ for (j = 0; j < num_rows; ++j) {
+ for (i = 0; i < mb_w; ++i) {
+ const uint32_t alpha_value = alpha[i];
+ dst[4 * i] = alpha_value;
+ alpha_mask &= alpha_value;
}
+ alpha += io->width;
+ dst += buf->stride;
+ }
+ // alpha_mask is < 0xff if there's non-trivial alpha to premultiply with.
+ if (alpha_mask != 0xff && WebPIsPremultipliedMode(colorspace)) {
+ WebPApplyAlphaMultiply(base_rgba, alpha_first,
+ mb_w, num_rows, buf->stride);
}
}
return 0;
@@ -249,28 +247,27 @@ static int EmitAlphaRGBA4444(const VP8Io* const io, WebPDecParams* const p) {
const uint8_t* alpha = io->a;
if (alpha != NULL) {
const int mb_w = io->mb_w;
- int i, j;
+ const WEBP_CSP_MODE colorspace = p->output->colorspace;
const WebPRGBABuffer* const buf = &p->output->u.RGBA;
int num_rows;
const int start_y = GetAlphaSourceRow(io, &alpha, &num_rows);
+ uint8_t* const base_rgba = buf->rgba + start_y * buf->stride;
+ uint8_t* alpha_dst = base_rgba + 1;
uint32_t alpha_mask = 0x0f;
+ int i, j;
- {
- uint8_t* const base_rgba = buf->rgba + start_y * buf->stride;
- uint8_t* alpha_dst = base_rgba + 1;
- for (j = 0; j < num_rows; ++j) {
- for (i = 0; i < mb_w; ++i) {
- // Fill in the alpha value (converted to 4 bits).
- const uint32_t alpha_value = alpha[i] >> 4;
- alpha_dst[2 * i] = (alpha_dst[2 * i] & 0xf0) | alpha_value;
- alpha_mask &= alpha_value;
- }
- alpha += io->width;
- alpha_dst += buf->stride;
- }
- if (alpha_mask != 0x0f && p->output->colorspace == MODE_rgbA_4444) {
- WebPApplyAlphaMultiply4444(base_rgba, mb_w, num_rows, buf->stride);
+ for (j = 0; j < num_rows; ++j) {
+ for (i = 0; i < mb_w; ++i) {
+ // Fill in the alpha value (converted to 4 bits).
+ const uint32_t alpha_value = alpha[i] >> 4;
+ alpha_dst[2 * i] = (alpha_dst[2 * i] & 0xf0) | alpha_value;
+ alpha_mask &= alpha_value;
}
+ alpha += io->width;
+ alpha_dst += buf->stride;
+ }
+ if (alpha_mask != 0x0f && WebPIsPremultipliedMode(colorspace)) {
+ WebPApplyAlphaMultiply4444(base_rgba, mb_w, num_rows, buf->stride);
}
}
return 0;
@@ -497,8 +494,7 @@ static int InitRGBRescaler(const VP8Io* const io, WebPDecParams* const p) {
tmp_size1 += work_size;
tmp_size2 += out_width;
}
- p->memory =
- calloc(1, tmp_size1 * sizeof(*work) + tmp_size2 * sizeof(*tmp));
+ p->memory = calloc(1, tmp_size1 * sizeof(*work) + tmp_size2 * sizeof(*tmp));
if (p->memory == NULL) {
return 0; // memory error
}
@@ -595,7 +591,7 @@ static int CustomSetup(VP8Io* io) {
//------------------------------------------------------------------------------
static int CustomPut(const VP8Io* io) {
- WebPDecParams* p = (WebPDecParams*)io->opaque;
+ WebPDecParams* const p = (WebPDecParams*)io->opaque;
const int mb_w = io->mb_w;
const int mb_h = io->mb_h;
int num_lines_out;
diff --git a/src/dec/vp8.c b/src/dec/vp8.c
index b0ccfa2a..253cb6b6 100644
--- a/src/dec/vp8.c
+++ b/src/dec/vp8.c
@@ -236,20 +236,6 @@ static int ParseFilterHeader(VP8BitReader* br, VP8Decoder* const dec) {
}
}
dec->filter_type_ = (hdr->level_ == 0) ? 0 : hdr->simple_ ? 1 : 2;
- if (dec->filter_type_ > 0) { // precompute filter levels per segment
- if (dec->segment_hdr_.use_segment_) {
- int s;
- for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
- int strength = dec->segment_hdr_.filter_strength_[s];
- if (!dec->segment_hdr_.absolute_delta_) {
- strength += hdr->level_;
- }
- dec->filter_levels_[s] = strength;
- }
- } else {
- dec->filter_levels_[0] = hdr->level_;
- }
- }
return !br->eof_;
}
@@ -458,7 +444,7 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
//------------------------------------------------------------------------------
// Residual decoding (Paragraph 13.2 / 13.3)
-static const uint8_t kBands[16 + 1] = {
+static const int kBands[16 + 1] = {
0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7,
0 // extra entry as sentinel
};
@@ -474,6 +460,39 @@ static const uint8_t kZigzag[16] = {
};
typedef const uint8_t (*ProbaArray)[NUM_CTX][NUM_PROBAS]; // for const-casting
+typedef const uint8_t (*ProbaCtxArray)[NUM_PROBAS];
+
+// See section 13-2: http://tools.ietf.org/html/rfc6386#section-13.2
+static int GetLargeValue(VP8BitReader* const br, const uint8_t* const p) {
+ int v;
+ if (!VP8GetBit(br, p[3])) {
+ if (!VP8GetBit(br, p[4])) {
+ v = 2;
+ } else {
+ v = 3 + VP8GetBit(br, p[5]);
+ }
+ } else {
+ if (!VP8GetBit(br, p[6])) {
+ if (!VP8GetBit(br, p[7])) {
+ v = 5 + VP8GetBit(br, 159);
+ } else {
+ v = 7 + 2 * VP8GetBit(br, 165);
+ v += VP8GetBit(br, 145);
+ }
+ } else {
+ const uint8_t* tab;
+ const int bit1 = VP8GetBit(br, p[8]);
+ const int bit0 = VP8GetBit(br, p[9 + bit1]);
+ const int cat = 2 * bit1 + bit0;
+ v = 0;
+ for (tab = kCat3456[cat]; *tab; ++tab) {
+ v += v + VP8GetBit(br, *tab);
+ }
+ v += 3 + (8 << cat);
+ }
+ }
+ return v;
+}
// Returns the position of the last non-zero coeff plus one
// (and 0 if there's no coeff at all)
@@ -484,54 +503,26 @@ static int GetCoeffs(VP8BitReader* const br, ProbaArray prob,
if (!VP8GetBit(br, p[0])) { // first EOB is more a 'CBP' bit.
return 0;
}
- while (1) {
- ++n;
+ for (; n < 16; ++n) {
+ const ProbaCtxArray p_ctx = prob[kBands[n + 1]];
if (!VP8GetBit(br, p[1])) {
- p = prob[kBands[n]][0];
+ p = p_ctx[0];
} else { // non zero coeff
- int v, j;
+ int v;
if (!VP8GetBit(br, p[2])) {
- p = prob[kBands[n]][1];
v = 1;
+ p = p_ctx[1];
} else {
- if (!VP8GetBit(br, p[3])) {
- if (!VP8GetBit(br, p[4])) {
- v = 2;
- } else {
- v = 3 + VP8GetBit(br, p[5]);
- }
- } else {
- if (!VP8GetBit(br, p[6])) {
- if (!VP8GetBit(br, p[7])) {
- v = 5 + VP8GetBit(br, 159);
- } else {
- v = 7 + 2 * VP8GetBit(br, 165);
- v += VP8GetBit(br, 145);
- }
- } else {
- const uint8_t* tab;
- const int bit1 = VP8GetBit(br, p[8]);
- const int bit0 = VP8GetBit(br, p[9 + bit1]);
- const int cat = 2 * bit1 + bit0;
- v = 0;
- for (tab = kCat3456[cat]; *tab; ++tab) {
- v += v + VP8GetBit(br, *tab);
- }
- v += 3 + (8 << cat);
- }
- }
- p = prob[kBands[n]][2];
+ v = GetLargeValue(br, p);
+ p = p_ctx[2];
}
- j = kZigzag[n - 1];
- out[j] = VP8GetSigned(br, v) * dq[j > 0];
- if (n == 16 || !VP8GetBit(br, p[0])) { // EOB
- return n;
+ out[kZigzag[n]] = VP8GetSigned(br, v) * dq[n > 0];
+ if (n < 15 && !VP8GetBit(br, p[0])) { // EOB
+ return n + 1;
}
}
- if (n == 16) {
- return 16;
- }
}
+ return 16;
}
// Alias-safe way of converting 4bytes to 32bits.
@@ -670,6 +661,12 @@ int VP8DecodeMB(VP8Decoder* const dec, VP8BitReader* const token_br) {
dec->non_zero_ac_ = 0;
}
+ if (dec->filter_type_ > 0) { // store filter info
+ VP8FInfo* const finfo = dec->f_info_ + dec->mb_x_;
+ *finfo = dec->fstrengths_[dec->segment_][dec->is_i4x4_];
+ finfo->f_inner_ = (!info->skip_ || dec->is_i4x4_);
+ }
+
return (!token_br->eof_);
}
@@ -693,10 +690,8 @@ static int ParseFrame(VP8Decoder* const dec, VP8Io* io) {
return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA,
"Premature end-of-file encountered.");
}
+ // Reconstruct and emit samples.
VP8ReconstructBlock(dec);
-
- // Store data and save block's filtering params
- VP8StoreBlock(dec);
}
if (!VP8ProcessRow(dec, io)) {
return VP8SetError(dec, VP8_STATUS_USER_ABORT, "Output aborted.");
diff --git a/src/dec/vp8i.h b/src/dec/vp8i.h
index 4382edfd..4f5192e2 100644
--- a/src/dec/vp8i.h
+++ b/src/dec/vp8i.h
@@ -28,7 +28,7 @@ extern "C" {
// version numbers
#define DEC_MAJ_VERSION 0
#define DEC_MIN_VERSION 2
-#define DEC_REV_VERSION 0
+#define DEC_REV_VERSION 1
#define ONLY_KEYFRAME_CODE // to remove any code related to P-Frames
@@ -157,7 +157,7 @@ typedef struct { // filter specs
} VP8FInfo;
typedef struct { // used for syntax-parsing
- unsigned int nz_; // non-zero AC/DC coeffs
+ unsigned int nz_:24; // non-zero AC/DC coeffs (24bit)
unsigned int dc_nz_:1; // non-zero DC coeffs
unsigned int skip_:1; // block type
} VP8MB;
@@ -269,9 +269,9 @@ struct VP8Decoder {
uint32_t non_zero_ac_;
// Filtering side-info
- int filter_type_; // 0=off, 1=simple, 2=complex
- int filter_row_; // per-row flag
- uint8_t filter_levels_[NUM_MB_SEGMENTS]; // precalculated per-segment
+ int filter_type_; // 0=off, 1=simple, 2=complex
+ int filter_row_; // per-row flag
+ VP8FInfo fstrengths_[NUM_MB_SEGMENTS][2]; // precalculated per-segment/type
// extensions
const uint8_t* alpha_data_; // compressed alpha data (if present)
@@ -312,8 +312,6 @@ VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io);
int VP8ExitCritical(VP8Decoder* const dec, VP8Io* const io);
// Process the last decoded row (filtering + output)
int VP8ProcessRow(VP8Decoder* const dec, VP8Io* const io);
-// Store a block, along with filtering params
-void VP8StoreBlock(VP8Decoder* const dec);
// To be called at the start of a new scanline, to initialize predictors.
void VP8InitScanline(VP8Decoder* const dec);
// Decode one macroblock. Returns false if there is not enough data.
diff --git a/src/dec/vp8l.c b/src/dec/vp8l.c
index 897e4395..a1c8d3a9 100644
--- a/src/dec/vp8l.c
+++ b/src/dec/vp8l.c
@@ -327,10 +327,10 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
hdr->huffman_subsample_bits_ = huffman_precision;
for (i = 0; i < huffman_pixs; ++i) {
// The huffman data is stored in red and green bytes.
- const int index = (huffman_image[i] >> 8) & 0xffff;
- huffman_image[i] = index;
- if (index >= num_htree_groups) {
- num_htree_groups = index + 1;
+ const int group = (huffman_image[i] >> 8) & 0xffff;
+ huffman_image[i] = group;
+ if (group >= num_htree_groups) {
+ num_htree_groups = group + 1;
}
}
}
diff --git a/src/dec/webp.c b/src/dec/webp.c
index 1edf6d97..962b3e2f 100644
--- a/src/dec/webp.c
+++ b/src/dec/webp.c
@@ -40,8 +40,8 @@ extern "C" {
// 20..23 VP8X flags bit-map corresponding to the chunk-types present.
// 24..26 Width of the Canvas Image.
// 27..29 Height of the Canvas Image.
-// There can be extra chunks after the "VP8X" chunk (ICCP, TILE, FRM, VP8,
-// META ...)
+// There can be extra chunks after the "VP8X" chunk (ICCP, FRGM, ANMF, VP8,
+// VP8L, XMP, EXIF ...)
// All sizes are in little-endian order.
// Note: chunk data size must be padded to multiple of 2 when written.
@@ -308,7 +308,7 @@ static VP8StatusCode ParseHeadersInternal(const uint8_t* data,
// necessary to send VP8X chunk to the decoder.
return VP8_STATUS_BITSTREAM_ERROR;
}
- if (has_alpha != NULL) *has_alpha = !!(flags & ALPHA_FLAG_BIT);
+ if (has_alpha != NULL) *has_alpha = !!(flags & ALPHA_FLAG);
if (found_vp8x && headers == NULL) {
return VP8_STATUS_OK; // Return features from VP8X header.
}
diff --git a/src/demux/demux.c b/src/demux/demux.c
new file mode 100644
index 00000000..690749ad
--- /dev/null
+++ b/src/demux/demux.c
@@ -0,0 +1,907 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// WebP container demux.
+//
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "../utils/utils.h"
+#include "webp/decode.h" // WebPGetInfo
+#include "webp/demux.h"
+#include "webp/format_constants.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+typedef struct {
+ size_t start_; // start location of the data
+ size_t end_; // end location
+ size_t riff_end_; // riff chunk end location, can be > end_.
+ size_t buf_size_; // size of the buffer
+ const uint8_t* buf_;
+} MemBuffer;
+
+typedef struct {
+ size_t offset_;
+ size_t size_;
+} ChunkData;
+
+typedef struct Frame {
+ int x_offset_, y_offset_;
+ int width_, height_;
+ int duration_;
+ WebPMuxAnimDispose dispose_method_;
+ int is_fragment_; // this is a frame fragment (and not a full frame).
+ int frame_num_; // the referent frame number for use in assembling fragments.
+ int complete_; // img_components_ contains a full image.
+ ChunkData img_components_[2]; // 0=VP8{,L} 1=ALPH
+ struct Frame* next_;
+} Frame;
+
+typedef struct Chunk {
+ ChunkData data_;
+ struct Chunk* next_;
+} Chunk;
+
+struct WebPDemuxer {
+ MemBuffer mem_;
+ WebPDemuxState state_;
+ int is_ext_format_;
+ uint32_t feature_flags_;
+ int canvas_width_, canvas_height_;
+ int loop_count_;
+ uint32_t bgcolor_;
+ int num_frames_;
+ Frame* frames_;
+ Chunk* chunks_; // non-image chunks
+};
+
+typedef enum {
+ PARSE_OK,
+ PARSE_NEED_MORE_DATA,
+ PARSE_ERROR
+} ParseStatus;
+
+typedef struct ChunkParser {
+ uint8_t id[4];
+ ParseStatus (*parse)(WebPDemuxer* const dmux);
+ int (*valid)(const WebPDemuxer* const dmux);
+} ChunkParser;
+
+static ParseStatus ParseSingleImage(WebPDemuxer* const dmux);
+static ParseStatus ParseVP8X(WebPDemuxer* const dmux);
+static int IsValidSimpleFormat(const WebPDemuxer* const dmux);
+static int IsValidExtendedFormat(const WebPDemuxer* const dmux);
+
+static const ChunkParser kMasterChunks[] = {
+ { { 'V', 'P', '8', ' ' }, ParseSingleImage, IsValidSimpleFormat },
+ { { 'V', 'P', '8', 'L' }, ParseSingleImage, IsValidSimpleFormat },
+ { { 'V', 'P', '8', 'X' }, ParseVP8X, IsValidExtendedFormat },
+ { { '0', '0', '0', '0' }, NULL, NULL },
+};
+
+// -----------------------------------------------------------------------------
+// MemBuffer
+
+static int RemapMemBuffer(MemBuffer* const mem,
+ const uint8_t* data, size_t size) {
+ if (size < mem->buf_size_) return 0; // can't remap to a shorter buffer!
+
+ mem->buf_ = data;
+ mem->end_ = mem->buf_size_ = size;
+ return 1;
+}
+
+static int InitMemBuffer(MemBuffer* const mem,
+ const uint8_t* data, size_t size) {
+ memset(mem, 0, sizeof(*mem));
+ return RemapMemBuffer(mem, data, size);
+}
+
+// Return the remaining data size available in 'mem'.
+static WEBP_INLINE size_t MemDataSize(const MemBuffer* const mem) {
+ return (mem->end_ - mem->start_);
+}
+
+// Return true if 'size' exceeds the end of the RIFF chunk.
+static WEBP_INLINE int SizeIsInvalid(const MemBuffer* const mem, size_t size) {
+ return (size > mem->riff_end_ - mem->start_);
+}
+
+static WEBP_INLINE void Skip(MemBuffer* const mem, size_t size) {
+ mem->start_ += size;
+}
+
+static WEBP_INLINE void Rewind(MemBuffer* const mem, size_t size) {
+ mem->start_ -= size;
+}
+
+static WEBP_INLINE const uint8_t* GetBuffer(MemBuffer* const mem) {
+ return mem->buf_ + mem->start_;
+}
+
+// Read from 'mem' and skip the read bytes.
+static WEBP_INLINE uint8_t ReadByte(MemBuffer* const mem) {
+ const uint8_t byte = mem->buf_[mem->start_];
+ Skip(mem, 1);
+ return byte;
+}
+
+static WEBP_INLINE int ReadLE16s(MemBuffer* const mem) {
+ const uint8_t* const data = mem->buf_ + mem->start_;
+ const int val = GetLE16(data);
+ Skip(mem, 2);
+ return val;
+}
+
+static WEBP_INLINE int ReadLE24s(MemBuffer* const mem) {
+ const uint8_t* const data = mem->buf_ + mem->start_;
+ const int val = GetLE24(data);
+ Skip(mem, 3);
+ return val;
+}
+
+static WEBP_INLINE uint32_t ReadLE32(MemBuffer* const mem) {
+ const uint8_t* const data = mem->buf_ + mem->start_;
+ const uint32_t val = GetLE32(data);
+ Skip(mem, 4);
+ return val;
+}
+
+// -----------------------------------------------------------------------------
+// Secondary chunk parsing
+
+static void AddChunk(WebPDemuxer* const dmux, Chunk* const chunk) {
+ Chunk** c = &dmux->chunks_;
+ while (*c != NULL) c = &(*c)->next_;
+ *c = chunk;
+ chunk->next_ = NULL;
+}
+
+// Add a frame to the end of the list, ensuring the last frame is complete.
+// Returns true on success, false otherwise.
+static int AddFrame(WebPDemuxer* const dmux, Frame* const frame) {
+ const Frame* last_frame = NULL;
+ Frame** f = &dmux->frames_;
+ while (*f != NULL) {
+ last_frame = *f;
+ f = &(*f)->next_;
+ }
+ if (last_frame != NULL && !last_frame->complete_) return 0;
+ *f = frame;
+ frame->next_ = NULL;
+ return 1;
+}
+
+// Store image bearing chunks to 'frame'.
+static ParseStatus StoreFrame(int frame_num, uint32_t min_size,
+ MemBuffer* const mem, Frame* const frame) {
+ int alpha_chunks = 0;
+ int image_chunks = 0;
+ int done = (MemDataSize(mem) < min_size);
+ ParseStatus status = PARSE_OK;
+
+ if (done) return PARSE_NEED_MORE_DATA;
+
+ do {
+ const size_t chunk_start_offset = mem->start_;
+ const uint32_t fourcc = ReadLE32(mem);
+ const uint32_t payload_size = ReadLE32(mem);
+ const uint32_t payload_size_padded = payload_size + (payload_size & 1);
+ const size_t payload_available = (payload_size_padded > MemDataSize(mem))
+ ? MemDataSize(mem) : payload_size_padded;
+ const size_t chunk_size = CHUNK_HEADER_SIZE + payload_available;
+
+ if (payload_size > MAX_CHUNK_PAYLOAD) return PARSE_ERROR;
+ if (SizeIsInvalid(mem, payload_size_padded)) return PARSE_ERROR;
+ if (payload_size_padded > MemDataSize(mem)) status = PARSE_NEED_MORE_DATA;
+
+ switch (fourcc) {
+ case MKFOURCC('A', 'L', 'P', 'H'):
+ if (alpha_chunks == 0) {
+ ++alpha_chunks;
+ frame->img_components_[1].offset_ = chunk_start_offset;
+ frame->img_components_[1].size_ = chunk_size;
+ frame->frame_num_ = frame_num;
+ Skip(mem, payload_available);
+ } else {
+ goto Done;
+ }
+ break;
+ case MKFOURCC('V', 'P', '8', ' '):
+ case MKFOURCC('V', 'P', '8', 'L'):
+ if (image_chunks == 0) {
+ int width = 0, height = 0;
+ ++image_chunks;
+ frame->img_components_[0].offset_ = chunk_start_offset;
+ frame->img_components_[0].size_ = chunk_size;
+ // Extract the width and height from the bitstream, tolerating
+ // failures when the data is incomplete.
+ if (!WebPGetInfo(mem->buf_ + frame->img_components_[0].offset_,
+ frame->img_components_[0].size_, &width, &height) &&
+ status != PARSE_NEED_MORE_DATA) {
+ return PARSE_ERROR;
+ }
+
+ frame->width_ = width;
+ frame->height_ = height;
+ frame->frame_num_ = frame_num;
+ frame->complete_ = (status == PARSE_OK);
+ Skip(mem, payload_available);
+ } else {
+ goto Done;
+ }
+ break;
+ Done:
+ default:
+ // Restore fourcc/size when moving up one level in parsing.
+ Rewind(mem, CHUNK_HEADER_SIZE);
+ done = 1;
+ break;
+ }
+
+ if (mem->start_ == mem->riff_end_) {
+ done = 1;
+ } else if (MemDataSize(mem) < CHUNK_HEADER_SIZE) {
+ status = PARSE_NEED_MORE_DATA;
+ }
+ } while (!done && status == PARSE_OK);
+
+ return status;
+}
+
+// Creates a new Frame if 'actual_size' is within bounds and 'mem' contains
+// enough data ('min_size') to parse the payload.
+// Returns PARSE_OK on success with *frame pointing to the new Frame.
+// Returns PARSE_NEED_MORE_DATA with insufficient data, PARSE_ERROR otherwise.
+static ParseStatus NewFrame(const MemBuffer* const mem,
+ uint32_t min_size, uint32_t actual_size,
+ Frame** frame) {
+ if (SizeIsInvalid(mem, min_size)) return PARSE_ERROR;
+ if (actual_size < min_size) return PARSE_ERROR;
+ if (MemDataSize(mem) < min_size) return PARSE_NEED_MORE_DATA;
+
+ *frame = (Frame*)calloc(1, sizeof(**frame));
+ return (*frame == NULL) ? PARSE_ERROR : PARSE_OK;
+}
+
+// Parse a 'ANMF' chunk and any image bearing chunks that immediately follow.
+// 'frame_chunk_size' is the previously validated, padded chunk size.
+static ParseStatus ParseFrame(
+ WebPDemuxer* const dmux, uint32_t frame_chunk_size) {
+ const int has_frames = !!(dmux->feature_flags_ & ANIMATION_FLAG);
+ const uint32_t anmf_payload_size = frame_chunk_size - ANMF_CHUNK_SIZE;
+ int added_frame = 0;
+ MemBuffer* const mem = &dmux->mem_;
+ Frame* frame;
+ ParseStatus status =
+ NewFrame(mem, ANMF_CHUNK_SIZE, frame_chunk_size, &frame);
+ if (status != PARSE_OK) return status;
+
+ frame->x_offset_ = 2 * ReadLE24s(mem);
+ frame->y_offset_ = 2 * ReadLE24s(mem);
+ frame->width_ = 1 + ReadLE24s(mem);
+ frame->height_ = 1 + ReadLE24s(mem);
+ frame->duration_ = ReadLE24s(mem);
+ frame->dispose_method_ = (WebPMuxAnimDispose)(ReadByte(mem) & 1);
+ if (frame->width_ * (uint64_t)frame->height_ >= MAX_IMAGE_AREA) {
+ return PARSE_ERROR;
+ }
+
+ // Store a frame only if the animation flag is set and all data for this frame
+ // is available.
+ status = StoreFrame(dmux->num_frames_ + 1, anmf_payload_size, mem, frame);
+ if (status != PARSE_ERROR && has_frames && frame->frame_num_ > 0) {
+ added_frame = AddFrame(dmux, frame);
+ if (added_frame) {
+ ++dmux->num_frames_;
+ } else {
+ status = PARSE_ERROR;
+ }
+ }
+
+ if (!added_frame) free(frame);
+ return status;
+}
+
+// Parse a 'FRGM' chunk and any image bearing chunks that immediately follow.
+// 'fragment_chunk_size' is the previously validated, padded chunk size.
+static ParseStatus ParseFragment(WebPDemuxer* const dmux,
+ uint32_t fragment_chunk_size) {
+ const int has_fragments = !!(dmux->feature_flags_ & FRAGMENTS_FLAG);
+ const uint32_t frgm_payload_size = fragment_chunk_size - FRGM_CHUNK_SIZE;
+ int added_fragment = 0;
+ MemBuffer* const mem = &dmux->mem_;
+ Frame* frame;
+ ParseStatus status =
+ NewFrame(mem, FRGM_CHUNK_SIZE, fragment_chunk_size, &frame);
+ if (status != PARSE_OK) return status;
+
+ frame->is_fragment_ = 1;
+ frame->x_offset_ = 2 * ReadLE24s(mem);
+ frame->y_offset_ = 2 * ReadLE24s(mem);
+
+ // Store a fragment only if the fragments flag is set and all data for this
+ // fragment is available.
+ status = StoreFrame(dmux->num_frames_, frgm_payload_size, mem, frame);
+ if (status != PARSE_ERROR && has_fragments && frame->frame_num_ > 0) {
+ // Note num_frames_ is incremented only when all fragments have been
+ // consumed.
+ added_fragment = AddFrame(dmux, frame);
+ if (!added_fragment) status = PARSE_ERROR;
+ }
+
+ if (!added_fragment) free(frame);
+ return status;
+}
+
+// General chunk storage starting with the header at 'start_offset' allowing
+// the user to request the payload via a fourcc string. 'size' includes the
+// header and the unpadded payload size.
+// Returns true on success, false otherwise.
+static int StoreChunk(WebPDemuxer* const dmux,
+ size_t start_offset, uint32_t size) {
+ Chunk* const chunk = (Chunk*)calloc(1, sizeof(*chunk));
+ if (chunk == NULL) return 0;
+
+ chunk->data_.offset_ = start_offset;
+ chunk->data_.size_ = size;
+ AddChunk(dmux, chunk);
+ return 1;
+}
+
+// -----------------------------------------------------------------------------
+// Primary chunk parsing
+
+static int ReadHeader(MemBuffer* const mem) {
+ const size_t min_size = RIFF_HEADER_SIZE + CHUNK_HEADER_SIZE;
+ uint32_t riff_size;
+
+ // Basic file level validation.
+ if (MemDataSize(mem) < min_size) return 0;
+ if (memcmp(GetBuffer(mem), "RIFF", CHUNK_SIZE_BYTES) ||
+ memcmp(GetBuffer(mem) + CHUNK_HEADER_SIZE, "WEBP", CHUNK_SIZE_BYTES)) {
+ return 0;
+ }
+
+ riff_size = GetLE32(GetBuffer(mem) + TAG_SIZE);
+ if (riff_size < CHUNK_HEADER_SIZE) return 0;
+ if (riff_size > MAX_CHUNK_PAYLOAD) return 0;
+
+ // There's no point in reading past the end of the RIFF chunk
+ mem->riff_end_ = riff_size + CHUNK_HEADER_SIZE;
+ if (mem->buf_size_ > mem->riff_end_) {
+ mem->buf_size_ = mem->end_ = mem->riff_end_;
+ }
+
+ Skip(mem, RIFF_HEADER_SIZE);
+ return 1;
+}
+
+static ParseStatus ParseSingleImage(WebPDemuxer* const dmux) {
+ const size_t min_size = CHUNK_HEADER_SIZE;
+ MemBuffer* const mem = &dmux->mem_;
+ Frame* frame;
+ ParseStatus status;
+
+ if (dmux->frames_ != NULL) return PARSE_ERROR;
+ if (SizeIsInvalid(mem, min_size)) return PARSE_ERROR;
+ if (MemDataSize(mem) < min_size) return PARSE_NEED_MORE_DATA;
+
+ frame = (Frame*)calloc(1, sizeof(*frame));
+ if (frame == NULL) return PARSE_ERROR;
+
+ // For the single image case, we allow parsing of a partial frame. But we need
+ // at least CHUNK_HEADER_SIZE for parsing.
+ status = StoreFrame(1, CHUNK_HEADER_SIZE, &dmux->mem_, frame);
+ if (status != PARSE_ERROR) {
+ const int has_alpha = !!(dmux->feature_flags_ & ALPHA_FLAG);
+ // Clear any alpha when the alpha flag is missing.
+ if (!has_alpha && frame->img_components_[1].size_ > 0) {
+ frame->img_components_[1].offset_ = 0;
+ frame->img_components_[1].size_ = 0;
+ }
+
+ // Use the frame width/height as the canvas values for non-vp8x files.
+ if (!dmux->is_ext_format_ && frame->width_ > 0 && frame->height_ > 0) {
+ dmux->state_ = WEBP_DEMUX_PARSED_HEADER;
+ dmux->canvas_width_ = frame->width_;
+ dmux->canvas_height_ = frame->height_;
+ }
+ AddFrame(dmux, frame);
+ dmux->num_frames_ = 1;
+ } else {
+ free(frame);
+ }
+
+ return status;
+}
+
+static ParseStatus ParseVP8X(WebPDemuxer* const dmux) {
+ MemBuffer* const mem = &dmux->mem_;
+ int anim_chunks = 0;
+ uint32_t vp8x_size;
+ ParseStatus status = PARSE_OK;
+
+ if (MemDataSize(mem) < CHUNK_HEADER_SIZE) return PARSE_NEED_MORE_DATA;
+
+ dmux->is_ext_format_ = 1;
+ Skip(mem, TAG_SIZE); // VP8X
+ vp8x_size = ReadLE32(mem);
+ if (vp8x_size > MAX_CHUNK_PAYLOAD) return PARSE_ERROR;
+ if (vp8x_size < VP8X_CHUNK_SIZE) return PARSE_ERROR;
+ vp8x_size += vp8x_size & 1;
+ if (SizeIsInvalid(mem, vp8x_size)) return PARSE_ERROR;
+ if (MemDataSize(mem) < vp8x_size) return PARSE_NEED_MORE_DATA;
+
+ dmux->feature_flags_ = ReadByte(mem);
+ Skip(mem, 3); // Reserved.
+ dmux->canvas_width_ = 1 + ReadLE24s(mem);
+ dmux->canvas_height_ = 1 + ReadLE24s(mem);
+ if (dmux->canvas_width_ * (uint64_t)dmux->canvas_height_ >= MAX_IMAGE_AREA) {
+ return PARSE_ERROR; // image final dimension is too large
+ }
+ Skip(mem, vp8x_size - VP8X_CHUNK_SIZE); // skip any trailing data.
+ dmux->state_ = WEBP_DEMUX_PARSED_HEADER;
+
+ if (SizeIsInvalid(mem, CHUNK_HEADER_SIZE)) return PARSE_ERROR;
+ if (MemDataSize(mem) < CHUNK_HEADER_SIZE) return PARSE_NEED_MORE_DATA;
+
+ do {
+ int store_chunk = 1;
+ const size_t chunk_start_offset = mem->start_;
+ const uint32_t fourcc = ReadLE32(mem);
+ const uint32_t chunk_size = ReadLE32(mem);
+ const uint32_t chunk_size_padded = chunk_size + (chunk_size & 1);
+
+ if (chunk_size > MAX_CHUNK_PAYLOAD) return PARSE_ERROR;
+ if (SizeIsInvalid(mem, chunk_size_padded)) return PARSE_ERROR;
+
+ switch (fourcc) {
+ case MKFOURCC('V', 'P', '8', 'X'): {
+ return PARSE_ERROR;
+ }
+ case MKFOURCC('A', 'L', 'P', 'H'):
+ case MKFOURCC('V', 'P', '8', ' '):
+ case MKFOURCC('V', 'P', '8', 'L'): {
+ Rewind(mem, CHUNK_HEADER_SIZE);
+ status = ParseSingleImage(dmux);
+ break;
+ }
+ case MKFOURCC('A', 'N', 'I', 'M'): {
+ if (chunk_size_padded < ANIM_CHUNK_SIZE) return PARSE_ERROR;
+
+ if (MemDataSize(mem) < chunk_size_padded) {
+ status = PARSE_NEED_MORE_DATA;
+ } else if (anim_chunks == 0) {
+ ++anim_chunks;
+ dmux->bgcolor_ = ReadLE32(mem);
+ dmux->loop_count_ = ReadLE16s(mem);
+ Skip(mem, chunk_size_padded - ANIM_CHUNK_SIZE);
+ } else {
+ store_chunk = 0;
+ goto Skip;
+ }
+ break;
+ }
+ case MKFOURCC('A', 'N', 'M', 'F'): {
+ status = ParseFrame(dmux, chunk_size_padded);
+ break;
+ }
+ case MKFOURCC('F', 'R', 'G', 'M'): {
+ if (dmux->num_frames_ == 0) dmux->num_frames_ = 1;
+ status = ParseFragment(dmux, chunk_size_padded);
+ break;
+ }
+ case MKFOURCC('I', 'C', 'C', 'P'): {
+ store_chunk = !!(dmux->feature_flags_ & ICCP_FLAG);
+ goto Skip;
+ }
+ case MKFOURCC('X', 'M', 'P', ' '): {
+ store_chunk = !!(dmux->feature_flags_ & XMP_FLAG);
+ goto Skip;
+ }
+ case MKFOURCC('E', 'X', 'I', 'F'): {
+ store_chunk = !!(dmux->feature_flags_ & EXIF_FLAG);
+ goto Skip;
+ }
+ Skip:
+ default: {
+ if (chunk_size_padded <= MemDataSize(mem)) {
+ if (store_chunk) {
+ // Store only the chunk header and unpadded size as only the payload
+ // will be returned to the user.
+ if (!StoreChunk(dmux, chunk_start_offset,
+ CHUNK_HEADER_SIZE + chunk_size)) {
+ return PARSE_ERROR;
+ }
+ }
+ Skip(mem, chunk_size_padded);
+ } else {
+ status = PARSE_NEED_MORE_DATA;
+ }
+ }
+ }
+
+ if (mem->start_ == mem->riff_end_) {
+ break;
+ } else if (MemDataSize(mem) < CHUNK_HEADER_SIZE) {
+ status = PARSE_NEED_MORE_DATA;
+ }
+ } while (status == PARSE_OK);
+
+ return status;
+}
+
+// -----------------------------------------------------------------------------
+// Format validation
+
+static int IsValidSimpleFormat(const WebPDemuxer* const dmux) {
+ const Frame* const frame = dmux->frames_;
+ if (dmux->state_ == WEBP_DEMUX_PARSING_HEADER) return 1;
+
+ if (dmux->canvas_width_ <= 0 || dmux->canvas_height_ <= 0) return 0;
+ if (dmux->state_ == WEBP_DEMUX_DONE && frame == NULL) return 0;
+
+ if (frame->width_ <= 0 || frame->height_ <= 0) return 0;
+ return 1;
+}
+
+static int IsValidExtendedFormat(const WebPDemuxer* const dmux) {
+ const int has_fragments = !!(dmux->feature_flags_ & FRAGMENTS_FLAG);
+ const int has_frames = !!(dmux->feature_flags_ & ANIMATION_FLAG);
+ const Frame* f;
+
+ if (dmux->state_ == WEBP_DEMUX_PARSING_HEADER) return 1;
+
+ if (dmux->canvas_width_ <= 0 || dmux->canvas_height_ <= 0) return 0;
+ if (dmux->loop_count_ < 0) return 0;
+ if (dmux->state_ == WEBP_DEMUX_DONE && dmux->frames_ == NULL) return 0;
+
+ for (f = dmux->frames_; f != NULL; f = f->next_) {
+ const int cur_frame_set = f->frame_num_;
+ int frame_count = 0, fragment_count = 0;
+
+ // Check frame properties and if the image is composed of fragments that
+ // each fragment came from a fragment.
+ for (; f != NULL && f->frame_num_ == cur_frame_set; f = f->next_) {
+ const ChunkData* const image = f->img_components_;
+ const ChunkData* const alpha = f->img_components_ + 1;
+
+ if (!has_fragments && f->is_fragment_) return 0;
+ if (!has_frames && f->frame_num_ > 1) return 0;
+ if (f->x_offset_ < 0 || f->y_offset_ < 0) return 0;
+ if (f->complete_) {
+ if (alpha->size_ == 0 && image->size_ == 0) return 0;
+ // Ensure alpha precedes image bitstream.
+ if (alpha->size_ > 0 && alpha->offset_ > image->offset_) {
+ return 0;
+ }
+
+ if (f->width_ <= 0 || f->height_ <= 0) return 0;
+ } else {
+ // Ensure alpha precedes image bitstream.
+ if (alpha->size_ > 0 && image->size_ > 0 &&
+ alpha->offset_ > image->offset_) {
+ return 0;
+ }
+ // There shouldn't be any frames after an incomplete one.
+ if (f->next_ != NULL) return 0;
+ }
+
+ fragment_count += f->is_fragment_;
+ ++frame_count;
+ }
+ if (!has_fragments && frame_count > 1) return 0;
+ if (fragment_count > 0 && frame_count != fragment_count) return 0;
+ if (f == NULL) break;
+ }
+ return 1;
+}
+
+// -----------------------------------------------------------------------------
+// WebPDemuxer object
+
+static void InitDemux(WebPDemuxer* const dmux, const MemBuffer* const mem) {
+ dmux->state_ = WEBP_DEMUX_PARSING_HEADER;
+ dmux->loop_count_ = 1;
+ dmux->bgcolor_ = 0xFFFFFFFF; // White background by default.
+ dmux->canvas_width_ = -1;
+ dmux->canvas_height_ = -1;
+ dmux->mem_ = *mem;
+}
+
+WebPDemuxer* WebPDemuxInternal(const WebPData* data, int allow_partial,
+ WebPDemuxState* state, int version) {
+ const ChunkParser* parser;
+ int partial;
+ ParseStatus status = PARSE_ERROR;
+ MemBuffer mem;
+ WebPDemuxer* dmux;
+
+ if (WEBP_ABI_IS_INCOMPATIBLE(version, WEBP_DEMUX_ABI_VERSION)) return NULL;
+ if (data == NULL || data->bytes == NULL || data->size == 0) return NULL;
+
+ if (!InitMemBuffer(&mem, data->bytes, data->size)) return NULL;
+ if (!ReadHeader(&mem)) return NULL;
+
+ partial = (mem.buf_size_ < mem.riff_end_);
+ if (!allow_partial && partial) return NULL;
+
+ dmux = (WebPDemuxer*)calloc(1, sizeof(*dmux));
+ if (dmux == NULL) return NULL;
+ InitDemux(dmux, &mem);
+
+ for (parser = kMasterChunks; parser->parse != NULL; ++parser) {
+ if (!memcmp(parser->id, GetBuffer(&dmux->mem_), TAG_SIZE)) {
+ status = parser->parse(dmux);
+ if (status == PARSE_OK) dmux->state_ = WEBP_DEMUX_DONE;
+ if (status == PARSE_NEED_MORE_DATA && !partial) status = PARSE_ERROR;
+ if (status != PARSE_ERROR && !parser->valid(dmux)) status = PARSE_ERROR;
+ break;
+ }
+ }
+ if (state) *state = dmux->state_;
+
+ if (status == PARSE_ERROR) {
+ WebPDemuxDelete(dmux);
+ return NULL;
+ }
+ return dmux;
+}
+
+void WebPDemuxDelete(WebPDemuxer* dmux) {
+ Chunk* c;
+ Frame* f;
+ if (dmux == NULL) return;
+
+ for (f = dmux->frames_; f != NULL;) {
+ Frame* const cur_frame = f;
+ f = f->next_;
+ free(cur_frame);
+ }
+ for (c = dmux->chunks_; c != NULL;) {
+ Chunk* const cur_chunk = c;
+ c = c->next_;
+ free(cur_chunk);
+ }
+ free(dmux);
+}
+
+// -----------------------------------------------------------------------------
+
+uint32_t WebPDemuxGetI(const WebPDemuxer* dmux, WebPFormatFeature feature) {
+ if (dmux == NULL) return 0;
+
+ switch (feature) {
+ case WEBP_FF_FORMAT_FLAGS: return dmux->feature_flags_;
+ case WEBP_FF_CANVAS_WIDTH: return (uint32_t)dmux->canvas_width_;
+ case WEBP_FF_CANVAS_HEIGHT: return (uint32_t)dmux->canvas_height_;
+ case WEBP_FF_LOOP_COUNT: return (uint32_t)dmux->loop_count_;
+ case WEBP_FF_BACKGROUND_COLOR: return dmux->bgcolor_;
+ }
+ return 0;
+}
+
+// -----------------------------------------------------------------------------
+// Frame iteration
+
+// Find the first 'frame_num' frame. There may be multiple such frames in a
+// fragmented frame.
+static const Frame* GetFrame(const WebPDemuxer* const dmux, int frame_num) {
+ const Frame* f;
+ for (f = dmux->frames_; f != NULL; f = f->next_) {
+ if (frame_num == f->frame_num_) break;
+ }
+ return f;
+}
+
+// Returns fragment 'fragment_num' and the total count.
+static const Frame* GetFragment(
+ const Frame* const frame_set, int fragment_num, int* const count) {
+ const int this_frame = frame_set->frame_num_;
+ const Frame* f = frame_set;
+ const Frame* fragment = NULL;
+ int total;
+
+ for (total = 0; f != NULL && f->frame_num_ == this_frame; f = f->next_) {
+ if (++total == fragment_num) fragment = f;
+ }
+ *count = total;
+ return fragment;
+}
+
+static const uint8_t* GetFramePayload(const uint8_t* const mem_buf,
+ const Frame* const frame,
+ size_t* const data_size) {
+ *data_size = 0;
+ if (frame != NULL) {
+ const ChunkData* const image = frame->img_components_;
+ const ChunkData* const alpha = frame->img_components_ + 1;
+ size_t start_offset = image->offset_;
+ *data_size = image->size_;
+
+ // if alpha exists it precedes image, update the size allowing for
+ // intervening chunks.
+ if (alpha->size_ > 0) {
+ const size_t inter_size = (image->offset_ > 0)
+ ? image->offset_ - (alpha->offset_ + alpha->size_)
+ : 0;
+ start_offset = alpha->offset_;
+ *data_size += alpha->size_ + inter_size;
+ }
+ return mem_buf + start_offset;
+ }
+ return NULL;
+}
+
+// Create a whole 'frame' from VP8 (+ alpha) or lossless.
+static int SynthesizeFrame(const WebPDemuxer* const dmux,
+ const Frame* const first_frame,
+ int fragment_num, WebPIterator* const iter) {
+ const uint8_t* const mem_buf = dmux->mem_.buf_;
+ int num_fragments;
+ size_t payload_size = 0;
+ const Frame* const fragment =
+ GetFragment(first_frame, fragment_num, &num_fragments);
+ const uint8_t* const payload =
+ GetFramePayload(mem_buf, fragment, &payload_size);
+ if (payload == NULL) return 0;
+ assert(first_frame != NULL);
+
+ iter->frame_num = first_frame->frame_num_;
+ iter->num_frames = dmux->num_frames_;
+ iter->fragment_num = fragment_num;
+ iter->num_fragments = num_fragments;
+ iter->x_offset = fragment->x_offset_;
+ iter->y_offset = fragment->y_offset_;
+ iter->width = fragment->width_;
+ iter->height = fragment->height_;
+ iter->duration = fragment->duration_;
+ iter->dispose_method = fragment->dispose_method_;
+ iter->complete = fragment->complete_;
+ iter->fragment.bytes = payload;
+ iter->fragment.size = payload_size;
+ // TODO(jzern): adjust offsets for 'FRGM's embedded in 'ANMF's
+ return 1;
+}
+
+static int SetFrame(int frame_num, WebPIterator* const iter) {
+ const Frame* frame;
+ const WebPDemuxer* const dmux = (WebPDemuxer*)iter->private_;
+ if (dmux == NULL || frame_num < 0) return 0;
+ if (frame_num > dmux->num_frames_) return 0;
+ if (frame_num == 0) frame_num = dmux->num_frames_;
+
+ frame = GetFrame(dmux, frame_num);
+ if (frame == NULL) return 0;
+
+ return SynthesizeFrame(dmux, frame, 1, iter);
+}
+
+int WebPDemuxGetFrame(const WebPDemuxer* dmux, int frame, WebPIterator* iter) {
+ if (iter == NULL) return 0;
+
+ memset(iter, 0, sizeof(*iter));
+ iter->private_ = (void*)dmux;
+ return SetFrame(frame, iter);
+}
+
+int WebPDemuxNextFrame(WebPIterator* iter) {
+ if (iter == NULL) return 0;
+ return SetFrame(iter->frame_num + 1, iter);
+}
+
+int WebPDemuxPrevFrame(WebPIterator* iter) {
+ if (iter == NULL) return 0;
+ if (iter->frame_num <= 1) return 0;
+ return SetFrame(iter->frame_num - 1, iter);
+}
+
+int WebPDemuxSelectFragment(WebPIterator* iter, int fragment_num) {
+ if (iter != NULL && iter->private_ != NULL && fragment_num > 0) {
+ const WebPDemuxer* const dmux = (WebPDemuxer*)iter->private_;
+ const Frame* const frame = GetFrame(dmux, iter->frame_num);
+ if (frame == NULL) return 0;
+
+ return SynthesizeFrame(dmux, frame, fragment_num, iter);
+ }
+ return 0;
+}
+
+void WebPDemuxReleaseIterator(WebPIterator* iter) {
+ (void)iter;
+}
+
+// -----------------------------------------------------------------------------
+// Chunk iteration
+
+static int ChunkCount(const WebPDemuxer* const dmux, const char fourcc[4]) {
+ const uint8_t* const mem_buf = dmux->mem_.buf_;
+ const Chunk* c;
+ int count = 0;
+ for (c = dmux->chunks_; c != NULL; c = c->next_) {
+ const uint8_t* const header = mem_buf + c->data_.offset_;
+ if (!memcmp(header, fourcc, TAG_SIZE)) ++count;
+ }
+ return count;
+}
+
+static const Chunk* GetChunk(const WebPDemuxer* const dmux,
+ const char fourcc[4], int chunk_num) {
+ const uint8_t* const mem_buf = dmux->mem_.buf_;
+ const Chunk* c;
+ int count = 0;
+ for (c = dmux->chunks_; c != NULL; c = c->next_) {
+ const uint8_t* const header = mem_buf + c->data_.offset_;
+ if (!memcmp(header, fourcc, TAG_SIZE)) ++count;
+ if (count == chunk_num) break;
+ }
+ return c;
+}
+
+static int SetChunk(const char fourcc[4], int chunk_num,
+ WebPChunkIterator* const iter) {
+ const WebPDemuxer* const dmux = (WebPDemuxer*)iter->private_;
+ int count;
+
+ if (dmux == NULL || fourcc == NULL || chunk_num < 0) return 0;
+ count = ChunkCount(dmux, fourcc);
+ if (count == 0) return 0;
+ if (chunk_num == 0) chunk_num = count;
+
+ if (chunk_num <= count) {
+ const uint8_t* const mem_buf = dmux->mem_.buf_;
+ const Chunk* const chunk = GetChunk(dmux, fourcc, chunk_num);
+ iter->chunk.bytes = mem_buf + chunk->data_.offset_ + CHUNK_HEADER_SIZE;
+ iter->chunk.size = chunk->data_.size_ - CHUNK_HEADER_SIZE;
+ iter->num_chunks = count;
+ iter->chunk_num = chunk_num;
+ return 1;
+ }
+ return 0;
+}
+
+int WebPDemuxGetChunk(const WebPDemuxer* dmux,
+ const char fourcc[4], int chunk_num,
+ WebPChunkIterator* iter) {
+ if (iter == NULL) return 0;
+
+ memset(iter, 0, sizeof(*iter));
+ iter->private_ = (void*)dmux;
+ return SetChunk(fourcc, chunk_num, iter);
+}
+
+int WebPDemuxNextChunk(WebPChunkIterator* iter) {
+ if (iter != NULL) {
+ const char* const fourcc =
+ (const char*)iter->chunk.bytes - CHUNK_HEADER_SIZE;
+ return SetChunk(fourcc, iter->chunk_num + 1, iter);
+ }
+ return 0;
+}
+
+int WebPDemuxPrevChunk(WebPChunkIterator* iter) {
+ if (iter != NULL && iter->chunk_num > 1) {
+ const char* const fourcc =
+ (const char*)iter->chunk.bytes - CHUNK_HEADER_SIZE;
+ return SetChunk(fourcc, iter->chunk_num - 1, iter);
+ }
+ return 0;
+}
+
+void WebPDemuxReleaseChunkIterator(WebPChunkIterator* iter) {
+ (void)iter;
+}
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/src/dsp/cpu-features.c b/src/dsp/cpu-features.c
new file mode 100644
index 00000000..6a5cd8f1
--- /dev/null
+++ b/src/dsp/cpu-features.c
@@ -0,0 +1,396 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <sys/system_properties.h>
+#ifdef __arm__
+#include <machine/cpu-features.h>
+#endif
+#include <pthread.h>
+#include "cpu-features.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+static pthread_once_t g_once;
+static AndroidCpuFamily g_cpuFamily;
+static uint64_t g_cpuFeatures;
+static int g_cpuCount;
+
+static const int android_cpufeatures_debug = 0;
+
+#ifdef __arm__
+# define DEFAULT_CPU_FAMILY ANDROID_CPU_FAMILY_ARM
+#elif defined __i386__
+# define DEFAULT_CPU_FAMILY ANDROID_CPU_FAMILY_X86
+#else
+# define DEFAULT_CPU_FAMILY ANDROID_CPU_FAMILY_UNKNOWN
+#endif
+
+#define D(...) \
+ do { \
+ if (android_cpufeatures_debug) { \
+ printf(__VA_ARGS__); fflush(stdout); \
+ } \
+ } while (0)
+
+#ifdef __i386__
+static __inline__ void x86_cpuid(int func, int values[4])
+{
+ int a, b, c, d;
+ /* We need to preserve ebx since we're compiling PIC code */
+ /* this means we can't use "=b" for the second output register */
+ __asm__ __volatile__ ( \
+ "push %%ebx\n"
+ "cpuid\n" \
+ "mov %1, %%ebx\n"
+ "pop %%ebx\n"
+ : "=a" (a), "=r" (b), "=c" (c), "=d" (d) \
+ : "a" (func) \
+ );
+ values[0] = a;
+ values[1] = b;
+ values[2] = c;
+ values[3] = d;
+}
+#endif
+
+/* Read the content of /proc/cpuinfo into a user-provided buffer.
+ * Return the length of the data, or -1 on error. Does *not*
+ * zero-terminate the content. Will not read more
+ * than 'buffsize' bytes.
+ */
+static int
+read_file(const char* pathname, char* buffer, size_t buffsize)
+{
+ int fd, len;
+
+ fd = open(pathname, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ do {
+ len = read(fd, buffer, buffsize);
+ } while (len < 0 && errno == EINTR);
+
+ close(fd);
+
+ return len;
+}
+
+/* Extract the content of a the first occurence of a given field in
+ * the content of /proc/cpuinfo and return it as a heap-allocated
+ * string that must be freed by the caller.
+ *
+ * Return NULL if not found
+ */
+static char*
+extract_cpuinfo_field(char* buffer, int buflen, const char* field)
+{
+ int fieldlen = strlen(field);
+ char* bufend = buffer + buflen;
+ char* result = NULL;
+ int len, ignore;
+ const char *p, *q;
+
+ /* Look for first field occurence, and ensures it starts the line.
+ */
+ p = buffer;
+ bufend = buffer + buflen;
+ for (;;) {
+ p = memmem(p, bufend-p, field, fieldlen);
+ if (p == NULL)
+ goto EXIT;
+
+ if (p == buffer || p[-1] == '\n')
+ break;
+
+ p += fieldlen;
+ }
+
+ /* Skip to the first column followed by a space */
+ p += fieldlen;
+ p = memchr(p, ':', bufend-p);
+ if (p == NULL || p[1] != ' ')
+ goto EXIT;
+
+ /* Find the end of the line */
+ p += 2;
+ q = memchr(p, '\n', bufend-p);
+ if (q == NULL)
+ q = bufend;
+
+ /* Copy the line into a heap-allocated buffer */
+ len = q-p;
+ result = malloc(len+1);
+ if (result == NULL)
+ goto EXIT;
+
+ memcpy(result, p, len);
+ result[len] = '\0';
+
+EXIT:
+ return result;
+}
+
+/* Count the number of occurences of a given field prefix in /proc/cpuinfo.
+ */
+static int
+count_cpuinfo_field(char* buffer, int buflen, const char* field)
+{
+ int fieldlen = strlen(field);
+ const char* p = buffer;
+ const char* bufend = buffer + buflen;
+ const char* q;
+ int count = 0;
+
+ for (;;) {
+ const char* q;
+
+ p = memmem(p, bufend-p, field, fieldlen);
+ if (p == NULL)
+ break;
+
+ /* Ensure that the field is at the start of a line */
+ if (p > buffer && p[-1] != '\n') {
+ p += fieldlen;
+ continue;
+ }
+
+
+ /* skip any whitespace */
+ q = p + fieldlen;
+ while (q < bufend && (*q == ' ' || *q == '\t'))
+ q++;
+
+ /* we must have a colon now */
+ if (q < bufend && *q == ':') {
+ count += 1;
+ q ++;
+ }
+ p = q;
+ }
+
+ return count;
+}
+
+/* Like strlen(), but for constant string literals */
+#define STRLEN_CONST(x) ((sizeof(x)-1)
+
+
+/* Checks that a space-separated list of items contains one given 'item'.
+ * Returns 1 if found, 0 otherwise.
+ */
+static int
+has_list_item(const char* list, const char* item)
+{
+ const char* p = list;
+ int itemlen = strlen(item);
+
+ if (list == NULL)
+ return 0;
+
+ while (*p) {
+ const char* q;
+
+ /* skip spaces */
+ while (*p == ' ' || *p == '\t')
+ p++;
+
+ /* find end of current list item */
+ q = p;
+ while (*q && *q != ' ' && *q != '\t')
+ q++;
+
+ if (itemlen == q-p && !memcmp(p, item, itemlen))
+ return 1;
+
+ /* skip to next item */
+ p = q;
+ }
+ return 0;
+}
+
+
+static void
+android_cpuInit(void)
+{
+ char cpuinfo[4096];
+ int cpuinfo_len;
+
+ g_cpuFamily = DEFAULT_CPU_FAMILY;
+ g_cpuFeatures = 0;
+ g_cpuCount = 1;
+
+ cpuinfo_len = read_file("/proc/cpuinfo", cpuinfo, sizeof cpuinfo);
+ D("cpuinfo_len is (%d):\n%.*s\n", cpuinfo_len,
+ cpuinfo_len >= 0 ? cpuinfo_len : 0, cpuinfo);
+
+ if (cpuinfo_len < 0) /* should not happen */ {
+ return;
+ }
+
+ /* Count the CPU cores, the value may be 0 for single-core CPUs */
+ g_cpuCount = count_cpuinfo_field(cpuinfo, cpuinfo_len, "processor");
+ if (g_cpuCount == 0) {
+ g_cpuCount = count_cpuinfo_field(cpuinfo, cpuinfo_len, "Processor");
+ if (g_cpuCount == 0) {
+ g_cpuCount = 1;
+ }
+ }
+
+ D("found cpuCount = %d\n", g_cpuCount);
+
+#ifdef __ARM_ARCH__
+ {
+ char* features = NULL;
+ char* architecture = NULL;
+
+ /* Extract architecture from the "CPU Architecture" field.
+ * The list is well-known, unlike the the output of
+ * the 'Processor' field which can vary greatly.
+ *
+ * See the definition of the 'proc_arch' array in
+ * $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in
+ * same file.
+ */
+ char* cpuArch = extract_cpuinfo_field(cpuinfo, cpuinfo_len, "CPU architecture");
+
+ if (cpuArch != NULL) {
+ char* end;
+ long archNumber;
+ int hasARMv7 = 0;
+
+ D("found cpuArch = '%s'\n", cpuArch);
+
+ /* read the initial decimal number, ignore the rest */
+ archNumber = strtol(cpuArch, &end, 10);
+
+ /* Here we assume that ARMv8 will be upwards compatible with v7
+ * in the future. Unfortunately, there is no 'Features' field to
+ * indicate that Thumb-2 is supported.
+ */
+ if (end > cpuArch && archNumber >= 7) {
+ hasARMv7 = 1;
+ }
+
+ /* Unfortunately, it seems that certain ARMv6-based CPUs
+ * report an incorrect architecture number of 7!
+ *
+ * See http://code.google.com/p/android/issues/detail?id=10812
+ *
+ * We try to correct this by looking at the 'elf_format'
+ * field reported by the 'Processor' field, which is of the
+ * form of "(v7l)" for an ARMv7-based CPU, and "(v6l)" for
+ * an ARMv6-one.
+ */
+ if (hasARMv7) {
+ char* cpuProc = extract_cpuinfo_field(cpuinfo, cpuinfo_len,
+ "Processor");
+ if (cpuProc != NULL) {
+ D("found cpuProc = '%s'\n", cpuProc);
+ if (has_list_item(cpuProc, "(v6l)")) {
+ D("CPU processor and architecture mismatch!!\n");
+ hasARMv7 = 0;
+ }
+ free(cpuProc);
+ }
+ }
+
+ if (hasARMv7) {
+ g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_ARMv7;
+ }
+
+ /* The LDREX / STREX instructions are available from ARMv6 */
+ if (archNumber >= 6) {
+ g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_LDREX_STREX;
+ }
+
+ free(cpuArch);
+ }
+
+ /* Extract the list of CPU features from 'Features' field */
+ char* cpuFeatures = extract_cpuinfo_field(cpuinfo, cpuinfo_len, "Features");
+
+ if (cpuFeatures != NULL) {
+
+ D("found cpuFeatures = '%s'\n", cpuFeatures);
+
+ if (has_list_item(cpuFeatures, "vfpv3"))
+ g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_VFPv3;
+
+ else if (has_list_item(cpuFeatures, "vfpv3d16"))
+ g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_VFPv3;
+
+ if (has_list_item(cpuFeatures, "neon")) {
+ /* Note: Certain kernels only report neon but not vfpv3
+ * in their features list. However, ARM mandates
+ * that if Neon is implemented, so must be VFPv3
+ * so always set the flag.
+ */
+ g_cpuFeatures |= ANDROID_CPU_ARM_FEATURE_NEON |
+ ANDROID_CPU_ARM_FEATURE_VFPv3;
+ }
+ free(cpuFeatures);
+ }
+ }
+#endif /* __ARM_ARCH__ */
+
+#ifdef __i386__
+ g_cpuFamily = ANDROID_CPU_FAMILY_X86;
+
+ int regs[4];
+
+/* According to http://en.wikipedia.org/wiki/CPUID */
+#define VENDOR_INTEL_b 0x756e6547
+#define VENDOR_INTEL_c 0x6c65746e
+#define VENDOR_INTEL_d 0x49656e69
+
+ x86_cpuid(0, regs);
+ int vendorIsIntel = (regs[1] == VENDOR_INTEL_b &&
+ regs[2] == VENDOR_INTEL_c &&
+ regs[3] == VENDOR_INTEL_d);
+
+ x86_cpuid(1, regs);
+ if ((regs[2] & (1 << 9)) != 0) {
+ g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_SSSE3;
+ }
+ if ((regs[2] & (1 << 23)) != 0) {
+ g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_POPCNT;
+ }
+ if (vendorIsIntel && (regs[2] & (1 << 22)) != 0) {
+ g_cpuFeatures |= ANDROID_CPU_X86_FEATURE_MOVBE;
+ }
+#endif
+}
+
+
+AndroidCpuFamily
+android_getCpuFamily(void)
+{
+ pthread_once(&g_once, android_cpuInit);
+ return g_cpuFamily;
+}
+
+
+uint64_t
+android_getCpuFeatures(void)
+{
+ pthread_once(&g_once, android_cpuInit);
+ return g_cpuFeatures;
+}
+
+
+int
+android_getCpuCount(void)
+{
+ pthread_once(&g_once, android_cpuInit);
+ return g_cpuCount;
+}
diff --git a/src/dsp/cpu-features.h b/src/dsp/cpu-features.h
new file mode 100644
index 00000000..f20c0bc4
--- /dev/null
+++ b/src/dsp/cpu-features.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// You can download Android source at
+// http://source.android.com/source/downloading.html
+// Original files are in ndk/sources/android/cpufeatures
+// Revision is Change-Id: I9a0629efba36a6023f05e5f092e7addcc1b7d2a9
+
+#ifndef CPU_FEATURES_H
+#define CPU_FEATURES_H
+
+#include <sys/cdefs.h>
+#include <stdint.h>
+
+__BEGIN_DECLS
+
+typedef enum {
+ ANDROID_CPU_FAMILY_UNKNOWN = 0,
+ ANDROID_CPU_FAMILY_ARM,
+ ANDROID_CPU_FAMILY_X86,
+
+ ANDROID_CPU_FAMILY_MAX /* do not remove */
+
+} AndroidCpuFamily;
+
+/* Return family of the device's CPU */
+extern AndroidCpuFamily android_getCpuFamily(void);
+
+enum {
+ ANDROID_CPU_ARM_FEATURE_ARMv7 = (1 << 0),
+ ANDROID_CPU_ARM_FEATURE_VFPv3 = (1 << 1),
+ ANDROID_CPU_ARM_FEATURE_NEON = (1 << 2),
+ ANDROID_CPU_ARM_FEATURE_LDREX_STREX = (1 << 3),
+};
+
+enum {
+ ANDROID_CPU_X86_FEATURE_SSSE3 = (1 << 0),
+ ANDROID_CPU_X86_FEATURE_POPCNT = (1 << 1),
+ ANDROID_CPU_X86_FEATURE_MOVBE = (1 << 2),
+};
+
+extern uint64_t android_getCpuFeatures(void);
+
+/* Return the number of CPU cores detected on this device. */
+extern int android_getCpuCount(void);
+
+__END_DECLS
+
+#endif /* CPU_FEATURES_H */
diff --git a/src/dsp/cpu.c b/src/dsp/cpu.c
index 2ee7812d..bf9ae0c7 100644
--- a/src/dsp/cpu.c
+++ b/src/dsp/cpu.c
@@ -11,9 +11,9 @@
#include "./dsp.h"
-//#if defined(__ANDROID__)
-//#include <cpu-features.h>
-//#endif
+#if defined(__ANDROID__)
+#include "./cpu-features.h"
+#endif
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
@@ -57,17 +57,17 @@ static int x86CPUInfo(CPUFeature feature) {
return 0;
}
VP8CPUInfo VP8GetCPUInfo = x86CPUInfo;
-//#elif defined(WEBP_ANDROID_NEON)
-//static int AndroidCPUInfo(CPUFeature feature) {
-// const AndroidCpuFamily cpu_family = android_getCpuFamily();
-// const uint64_t cpu_features = android_getCpuFeatures();
-// if (feature == kNEON) {
-// return (cpu_family == ANDROID_CPU_FAMILY_ARM &&
-// 0 != (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON));
-// }
-// return 0;
-//}
-//VP8CPUInfo VP8GetCPUInfo = AndroidCPUInfo;
+#elif defined(WEBP_ANDROID_NEON)
+static int AndroidCPUInfo(CPUFeature feature) {
+ const AndroidCpuFamily cpu_family = android_getCpuFamily();
+ const uint64_t cpu_features = android_getCpuFeatures();
+ if (feature == kNEON) {
+ return (cpu_family == ANDROID_CPU_FAMILY_ARM &&
+ 0 != (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON));
+ }
+ return 0;
+}
+VP8CPUInfo VP8GetCPUInfo = AndroidCPUInfo;
#elif defined(__ARM_NEON__)
// define a dummy function to enable turning off NEON at runtime by setting
// VP8DecGetCPUInfo = NULL
diff --git a/src/dsp/dec_neon.c b/src/dsp/dec_neon.c
index ec824b79..5d7cff15 100644
--- a/src/dsp/dec_neon.c
+++ b/src/dsp/dec_neon.c
@@ -12,14 +12,14 @@
#include "./dsp.h"
-#if defined(WEBP_USE_NEON)
-
-#include "../dec/vp8i.h"
-
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
+#if defined(WEBP_USE_NEON)
+
+#include "../dec/vp8i.h"
+
#define QRegs "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", \
"q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
@@ -155,6 +155,9 @@ static void SimpleHFilter16iNEON(uint8_t* p, int stride, int thresh) {
}
}
+//-----------------------------------------------------------------------------
+// Inverse transforms (Paragraph 14.4)
+
static void TransformOneNEON(const int16_t *in, uint8_t *dst) {
const int kBPS = BPS;
const int16_t constants[] = {20091, 17734, 0, 0};
@@ -311,19 +314,92 @@ static void TransformTwoNEON(const int16_t* in, uint8_t* dst, int do_two) {
}
}
+static void TransformWHT(const int16_t* in, int16_t* out) {
+ const int kStep = 32; // The store is only incrementing the pointer as if we
+ // had stored a single byte.
+ __asm__ volatile (
+ // part 1
+ // load data into q0, q1
+ "vld1.16 {q0, q1}, [%[in]] \n"
+
+ "vaddl.s16 q2, d0, d3 \n" // a0 = in[0] + in[12]
+ "vaddl.s16 q3, d1, d2 \n" // a1 = in[4] + in[8]
+ "vsubl.s16 q4, d1, d2 \n" // a2 = in[4] - in[8]
+ "vsubl.s16 q5, d0, d3 \n" // a3 = in[0] - in[12]
+
+ "vadd.s32 q0, q2, q3 \n" // tmp[0] = a0 + a1
+ "vsub.s32 q2, q2, q3 \n" // tmp[8] = a0 - a1
+ "vadd.s32 q1, q5, q4 \n" // tmp[4] = a3 + a2
+ "vsub.s32 q3, q5, q4 \n" // tmp[12] = a3 - a2
+
+ // Transpose
+ // q0 = tmp[0, 4, 8, 12], q1 = tmp[2, 6, 10, 14]
+ // q2 = tmp[1, 5, 9, 13], q3 = tmp[3, 7, 11, 15]
+ "vswp d1, d4 \n" // vtrn.64 q0, q2
+ "vswp d3, d6 \n" // vtrn.64 q1, q3
+ "vtrn.32 q0, q1 \n"
+ "vtrn.32 q2, q3 \n"
+
+ "vmov.s32 q4, #3 \n" // dc = 3
+ "vadd.s32 q0, q0, q4 \n" // dc = tmp[0] + 3
+ "vadd.s32 q6, q0, q3 \n" // a0 = dc + tmp[3]
+ "vadd.s32 q7, q1, q2 \n" // a1 = tmp[1] + tmp[2]
+ "vsub.s32 q8, q1, q2 \n" // a2 = tmp[1] - tmp[2]
+ "vsub.s32 q9, q0, q3 \n" // a3 = dc - tmp[3]
+
+ "vadd.s32 q0, q6, q7 \n"
+ "vshrn.s32 d0, q0, #3 \n" // (a0 + a1) >> 3
+ "vadd.s32 q1, q9, q8 \n"
+ "vshrn.s32 d1, q1, #3 \n" // (a3 + a2) >> 3
+ "vsub.s32 q2, q6, q7 \n"
+ "vshrn.s32 d2, q2, #3 \n" // (a0 - a1) >> 3
+ "vsub.s32 q3, q9, q8 \n"
+ "vshrn.s32 d3, q3, #3 \n" // (a3 - a2) >> 3
+
+ // set the results to output
+ "vst1.16 d0[0], [%[out]], %[kStep] \n"
+ "vst1.16 d1[0], [%[out]], %[kStep] \n"
+ "vst1.16 d2[0], [%[out]], %[kStep] \n"
+ "vst1.16 d3[0], [%[out]], %[kStep] \n"
+ "vst1.16 d0[1], [%[out]], %[kStep] \n"
+ "vst1.16 d1[1], [%[out]], %[kStep] \n"
+ "vst1.16 d2[1], [%[out]], %[kStep] \n"
+ "vst1.16 d3[1], [%[out]], %[kStep] \n"
+ "vst1.16 d0[2], [%[out]], %[kStep] \n"
+ "vst1.16 d1[2], [%[out]], %[kStep] \n"
+ "vst1.16 d2[2], [%[out]], %[kStep] \n"
+ "vst1.16 d3[2], [%[out]], %[kStep] \n"
+ "vst1.16 d0[3], [%[out]], %[kStep] \n"
+ "vst1.16 d1[3], [%[out]], %[kStep] \n"
+ "vst1.16 d2[3], [%[out]], %[kStep] \n"
+ "vst1.16 d3[3], [%[out]], %[kStep] \n"
+
+ : [out] "+r"(out) // modified registers
+ : [in] "r"(in), [kStep] "r"(kStep) // constants
+ : "memory", "q0", "q1", "q2", "q3", "q4",
+ "q5", "q6", "q7", "q8", "q9" // clobbered
+ );
+}
+
+#endif // WEBP_USE_NEON
+
+//------------------------------------------------------------------------------
+// Entry point
+
extern void VP8DspInitNEON(void);
void VP8DspInitNEON(void) {
+#if defined(WEBP_USE_NEON)
VP8Transform = TransformTwoNEON;
+ VP8TransformWHT = TransformWHT;
VP8SimpleVFilter16 = SimpleVFilter16NEON;
VP8SimpleHFilter16 = SimpleHFilter16NEON;
VP8SimpleVFilter16i = SimpleVFilter16iNEON;
VP8SimpleHFilter16i = SimpleHFilter16iNEON;
+#endif // WEBP_USE_NEON
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
-
-#endif // WEBP_USE_NEON
diff --git a/src/dsp/dec_sse2.c b/src/dsp/dec_sse2.c
index 472b68ec..1cac1b84 100644
--- a/src/dsp/dec_sse2.c
+++ b/src/dsp/dec_sse2.c
@@ -12,15 +12,15 @@
#include "./dsp.h"
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
#if defined(WEBP_USE_SSE2)
#include <emmintrin.h>
#include "../dec/vp8i.h"
-#if defined(__cplusplus) || defined(c_plusplus)
-extern "C" {
-#endif
-
//------------------------------------------------------------------------------
// Transforms (Paragraph 14.4)
@@ -194,7 +194,7 @@ static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) {
// Add inverse transform to 'dst' and store.
{
- const __m128i zero = _mm_set1_epi16(0);
+ const __m128i zero = _mm_setzero_si128();
// Load the reference(s).
__m128i dst0, dst1, dst2, dst3;
if (do_two) {
@@ -278,14 +278,14 @@ static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) {
#define GET_NOTHEV(p1, p0, q0, q1, hev_thresh, not_hev) { \
const __m128i zero = _mm_setzero_si128(); \
- const __m128i t1 = MM_ABS(p1, p0); \
- const __m128i t2 = MM_ABS(q1, q0); \
+ const __m128i t_1 = MM_ABS(p1, p0); \
+ const __m128i t_2 = MM_ABS(q1, q0); \
\
const __m128i h = _mm_set1_epi8(hev_thresh); \
- const __m128i t3 = _mm_subs_epu8(t1, h); /* abs(p1 - p0) - hev_tresh */ \
- const __m128i t4 = _mm_subs_epu8(t2, h); /* abs(q1 - q0) - hev_tresh */ \
+ const __m128i t_3 = _mm_subs_epu8(t_1, h); /* abs(p1 - p0) - hev_tresh */ \
+ const __m128i t_4 = _mm_subs_epu8(t_2, h); /* abs(q1 - q0) - hev_tresh */ \
\
- not_hev = _mm_or_si128(t3, t4); \
+ not_hev = _mm_or_si128(t_3, t_4); \
not_hev = _mm_cmpeq_epi8(not_hev, zero); /* not_hev <= t1 && not_hev <= t2 */\
}
@@ -314,13 +314,13 @@ static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) {
// Updates values of 2 pixels at MB edge during complex filtering.
// Update operations:
-// q = q - a and p = p + a; where a = [(a_hi >> 7), (a_lo >> 7)]
+// q = q - delta and p = p + delta; where delta = [(a_hi >> 7), (a_lo >> 7)]
#define UPDATE_2PIXELS(pi, qi, a_lo, a_hi) { \
const __m128i a_lo7 = _mm_srai_epi16(a_lo, 7); \
const __m128i a_hi7 = _mm_srai_epi16(a_hi, 7); \
- const __m128i a = _mm_packs_epi16(a_lo7, a_hi7); \
- pi = _mm_adds_epi8(pi, a); \
- qi = _mm_subs_epi8(qi, a); \
+ const __m128i delta = _mm_packs_epi16(a_lo7, a_hi7); \
+ pi = _mm_adds_epi8(pi, delta); \
+ qi = _mm_subs_epi8(qi, delta); \
}
static void NeedsFilter(const __m128i* p1, const __m128i* p0, const __m128i* q0,
@@ -876,9 +876,15 @@ static void HFilter8iSSE2(uint8_t* u, uint8_t* v, int stride,
Store16x4(u, v, stride, &p1, &p0, &q0, &q1);
}
+#endif // WEBP_USE_SSE2
+
+//------------------------------------------------------------------------------
+// Entry point
+
extern void VP8DspInitSSE2(void);
void VP8DspInitSSE2(void) {
+#if defined(WEBP_USE_SSE2)
VP8Transform = TransformSSE2;
VP8VFilter16 = VFilter16SSE2;
@@ -894,10 +900,9 @@ void VP8DspInitSSE2(void) {
VP8SimpleHFilter16 = SimpleHFilter16SSE2;
VP8SimpleVFilter16i = SimpleVFilter16iSSE2;
VP8SimpleHFilter16i = SimpleHFilter16iSSE2;
+#endif // WEBP_USE_SSE2
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
-
-#endif // WEBP_USE_SSE2
diff --git a/src/dsp/dsp.h b/src/dsp/dsp.h
index 3aad3095..9c186e55 100644
--- a/src/dsp/dsp.h
+++ b/src/dsp/dsp.h
@@ -29,9 +29,9 @@ extern "C" {
#define WEBP_USE_SSE2
#endif
-//#if defined(__ANDROID__) && defined(__ARM_ARCH_7A__)
-//#define WEBP_ANDROID_NEON // Android targets that might support NEON
-//#endif
+#if defined(__ANDROID__) && defined(__ARM_ARCH_7A__)
+#define WEBP_ANDROID_NEON // Android targets that might support NEON
+#endif
#if defined(__ARM_NEON__) || defined(WEBP_ANDROID_NEON)
#define WEBP_USE_NEON
@@ -49,8 +49,6 @@ extern VP8CPUInfo VP8GetCPUInfo;
//------------------------------------------------------------------------------
// Encoding
-int VP8GetAlpha(const int histo[]);
-
// Transforms
// VP8Idct: Does one of two inverse transforms. If do_two is set, the transforms
// will be done for (ref, in, dst) and (ref + 4, in + 16, dst + 4).
@@ -85,10 +83,11 @@ typedef int (*VP8QuantizeBlock)(int16_t in[16], int16_t out[16],
int n, const struct VP8Matrix* const mtx);
extern VP8QuantizeBlock VP8EncQuantizeBlock;
-// Compute susceptibility based on DCT-coeff histograms:
-// the higher, the "easier" the macroblock is to compress.
-typedef int (*VP8CHisto)(const uint8_t* ref, const uint8_t* pred,
- int start_block, int end_block);
+// Collect histogram for susceptibility calculation and accumulate in histo[].
+struct VP8Histogram;
+typedef void (*VP8CHisto)(const uint8_t* ref, const uint8_t* pred,
+ int start_block, int end_block,
+ struct VP8Histogram* const histo);
extern const int VP8DspScan[16 + 4 + 4];
extern VP8CHisto VP8CollectHistogram;
@@ -104,7 +103,7 @@ extern VP8DecIdct2 VP8Transform;
extern VP8DecIdct VP8TransformUV;
extern VP8DecIdct VP8TransformDC;
extern VP8DecIdct VP8TransformDCUV;
-extern void (*VP8TransformWHT)(const int16_t* in, int16_t* out);
+extern VP8WHT VP8TransformWHT;
// *dst is the destination block, with stride BPS. Boundary samples are
// assumed accessible when needed.
@@ -159,6 +158,9 @@ extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
// Initializes SSE2 version of the fancy upsamplers.
void WebPInitUpsamplersSSE2(void);
+// NEON version
+void WebPInitUpsamplersNEON(void);
+
#endif // FANCY_UPSAMPLING
// Point-sampling methods.
@@ -200,6 +202,7 @@ extern void (*WebPApplyAlphaMultiply4444)(
void WebPInitPremultiply(void);
void WebPInitPremultiplySSE2(void); // should not be called directly.
+void WebPInitPremultiplyNEON(void);
//------------------------------------------------------------------------------
diff --git a/src/dsp/enc.c b/src/dsp/enc.c
index 02234564..ae2c830a 100644
--- a/src/dsp/enc.c
+++ b/src/dsp/enc.c
@@ -17,31 +17,18 @@
extern "C" {
#endif
-//------------------------------------------------------------------------------
-// Compute susceptibility based on DCT-coeff histograms:
-// the higher, the "easier" the macroblock is to compress.
-
-static int ClipAlpha(int alpha) {
- return alpha < 0 ? 0 : alpha > 255 ? 255 : alpha;
+static WEBP_INLINE uint8_t clip_8b(int v) {
+ return (!(v & ~0xff)) ? v : (v < 0) ? 0 : 255;
}
-int VP8GetAlpha(const int histo[MAX_COEFF_THRESH + 1]) {
- int num = 0, den = 0, val = 0;
- int k;
- int alpha;
- // note: changing this loop to avoid the numerous "k + 1" slows things down.
- for (k = 0; k < MAX_COEFF_THRESH; ++k) {
- if (histo[k + 1]) {
- val += histo[k + 1];
- num += val * (k + 1);
- den += (k + 1) * (k + 1);
- }
- }
- // we scale the value to a usable [0..255] range
- alpha = den ? 10 * num / den - 5 : 0;
- return ClipAlpha(alpha);
+static WEBP_INLINE int clip_max(int v, int max) {
+ return (v > max) ? max : v;
}
+//------------------------------------------------------------------------------
+// Compute susceptibility based on DCT-coeff histograms:
+// the higher, the "easier" the macroblock is to compress.
+
const int VP8DspScan[16 + 4 + 4] = {
// Luma
0 + 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS,
@@ -53,27 +40,23 @@ const int VP8DspScan[16 + 4 + 4] = {
8 + 0 * BPS, 12 + 0 * BPS, 8 + 4 * BPS, 12 + 4 * BPS // V
};
-static int CollectHistogram(const uint8_t* ref, const uint8_t* pred,
- int start_block, int end_block) {
- int histo[MAX_COEFF_THRESH + 1] = { 0 };
- int16_t out[16];
- int j, k;
+static void CollectHistogram(const uint8_t* ref, const uint8_t* pred,
+ int start_block, int end_block,
+ VP8Histogram* const histo) {
+ int j;
for (j = start_block; j < end_block; ++j) {
- VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out);
+ int k;
+ int16_t out[16];
- // Convert coefficients to bin (within out[]).
- for (k = 0; k < 16; ++k) {
- const int v = abs(out[k]) >> 2;
- out[k] = (v > MAX_COEFF_THRESH) ? MAX_COEFF_THRESH : v;
- }
+ VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out);
- // Use bin to update histogram.
+ // Convert coefficients to bin.
for (k = 0; k < 16; ++k) {
- histo[out[k]]++;
+ const int v = abs(out[k]) >> 3; // TODO(skal): add rounding?
+ const int clipped_value = clip_max(v, MAX_COEFF_THRESH);
+ histo->distribution[clipped_value]++;
}
}
-
- return VP8GetAlpha(histo);
}
//------------------------------------------------------------------------------
@@ -89,15 +72,12 @@ static void InitTables(void) {
if (!tables_ok) {
int i;
for (i = -255; i <= 255 + 255; ++i) {
- clip1[255 + i] = (i < 0) ? 0 : (i > 255) ? 255 : i;
+ clip1[255 + i] = clip_8b(i);
}
tables_ok = 1;
}
}
-static WEBP_INLINE uint8_t clip_8b(int v) {
- return (!(v & ~0xff)) ? v : v < 0 ? 0 : 255;
-}
//------------------------------------------------------------------------------
// Transforms (Paragraph 14.4)
@@ -154,25 +134,25 @@ static void FTransform(const uint8_t* src, const uint8_t* ref, int16_t* out) {
int i;
int tmp[16];
for (i = 0; i < 4; ++i, src += BPS, ref += BPS) {
- const int d0 = src[0] - ref[0];
+ const int d0 = src[0] - ref[0]; // 9bit dynamic range ([-255,255])
const int d1 = src[1] - ref[1];
const int d2 = src[2] - ref[2];
const int d3 = src[3] - ref[3];
- const int a0 = (d0 + d3) << 3;
- const int a1 = (d1 + d2) << 3;
- const int a2 = (d1 - d2) << 3;
- const int a3 = (d0 - d3) << 3;
- tmp[0 + i * 4] = (a0 + a1);
- tmp[1 + i * 4] = (a2 * 2217 + a3 * 5352 + 14500) >> 12;
- tmp[2 + i * 4] = (a0 - a1);
- tmp[3 + i * 4] = (a3 * 2217 - a2 * 5352 + 7500) >> 12;
+ const int a0 = (d0 + d3); // 10b [-510,510]
+ const int a1 = (d1 + d2);
+ const int a2 = (d1 - d2);
+ const int a3 = (d0 - d3);
+ tmp[0 + i * 4] = (a0 + a1) << 3; // 14b [-8160,8160]
+ tmp[1 + i * 4] = (a2 * 2217 + a3 * 5352 + 1812) >> 9; // [-7536,7542]
+ tmp[2 + i * 4] = (a0 - a1) << 3;
+ tmp[3 + i * 4] = (a3 * 2217 - a2 * 5352 + 937) >> 9;
}
for (i = 0; i < 4; ++i) {
- const int a0 = (tmp[0 + i] + tmp[12 + i]);
+ const int a0 = (tmp[0 + i] + tmp[12 + i]); // 15b
const int a1 = (tmp[4 + i] + tmp[ 8 + i]);
const int a2 = (tmp[4 + i] - tmp[ 8 + i]);
const int a3 = (tmp[0 + i] - tmp[12 + i]);
- out[0 + i] = (a0 + a1 + 7) >> 4;
+ out[0 + i] = (a0 + a1 + 7) >> 4; // 12b
out[4 + i] = ((a2 * 2217 + a3 * 5352 + 12000) >> 16) + (a3 != 0);
out[8 + i] = (a0 - a1 + 7) >> 4;
out[12+ i] = ((a3 * 2217 - a2 * 5352 + 51000) >> 16);
@@ -589,30 +569,30 @@ static int TTransform(const uint8_t* in, const uint16_t* w) {
int i;
// horizontal pass
for (i = 0; i < 4; ++i, in += BPS) {
- const int a0 = (in[0] + in[2]) << 2;
- const int a1 = (in[1] + in[3]) << 2;
- const int a2 = (in[1] - in[3]) << 2;
- const int a3 = (in[0] - in[2]) << 2;
- tmp[0 + i * 4] = a0 + a1 + (a0 != 0);
+ const int a0 = in[0] + in[2];
+ const int a1 = in[1] + in[3];
+ const int a2 = in[1] - in[3];
+ const int a3 = in[0] - in[2];
+ tmp[0 + i * 4] = a0 + a1;
tmp[1 + i * 4] = a3 + a2;
tmp[2 + i * 4] = a3 - a2;
tmp[3 + i * 4] = a0 - a1;
}
// vertical pass
for (i = 0; i < 4; ++i, ++w) {
- const int a0 = (tmp[0 + i] + tmp[8 + i]);
- const int a1 = (tmp[4 + i] + tmp[12+ i]);
- const int a2 = (tmp[4 + i] - tmp[12+ i]);
- const int a3 = (tmp[0 + i] - tmp[8 + i]);
+ const int a0 = tmp[0 + i] + tmp[8 + i];
+ const int a1 = tmp[4 + i] + tmp[12+ i];
+ const int a2 = tmp[4 + i] - tmp[12+ i];
+ const int a3 = tmp[0 + i] - tmp[8 + i];
const int b0 = a0 + a1;
const int b1 = a3 + a2;
const int b2 = a3 - a2;
const int b3 = a0 - a1;
- // abs((b + (b<0) + 3) >> 3) = (abs(b) + 3) >> 3
- sum += w[ 0] * ((abs(b0) + 3) >> 3);
- sum += w[ 4] * ((abs(b1) + 3) >> 3);
- sum += w[ 8] * ((abs(b2) + 3) >> 3);
- sum += w[12] * ((abs(b3) + 3) >> 3);
+
+ sum += w[ 0] * abs(b0);
+ sum += w[ 4] * abs(b1);
+ sum += w[ 8] * abs(b2);
+ sum += w[12] * abs(b3);
}
return sum;
}
@@ -621,7 +601,7 @@ static int Disto4x4(const uint8_t* const a, const uint8_t* const b,
const uint16_t* const w) {
const int sum1 = TTransform(a, w);
const int sum2 = TTransform(b, w);
- return (abs(sum2 - sum1) + 8) >> 4;
+ return abs(sum2 - sum1) >> 5;
}
static int Disto16x16(const uint8_t* const a, const uint8_t* const b,
@@ -706,6 +686,7 @@ VP8QuantizeBlock VP8EncQuantizeBlock;
VP8BlockCopy VP8Copy4x4;
extern void VP8EncDspInitSSE2(void);
+extern void VP8EncDspInitNEON(void);
void VP8EncDspInit(void) {
InitTables();
@@ -734,6 +715,10 @@ void VP8EncDspInit(void) {
if (VP8GetCPUInfo(kSSE2)) {
VP8EncDspInitSSE2();
}
+#elif defined(WEBP_USE_NEON)
+ if (VP8GetCPUInfo(kNEON)) {
+ VP8EncDspInitNEON();
+ }
#endif
}
}
diff --git a/src/dsp/enc_neon.c b/src/dsp/enc_neon.c
new file mode 100644
index 00000000..b5a1fbaf
--- /dev/null
+++ b/src/dsp/enc_neon.c
@@ -0,0 +1,661 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// ARM NEON version of speed-critical encoding functions.
+//
+// adapted from libvpx (http://www.webmproject.org/code/)
+
+#include "./dsp.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+#if defined(WEBP_USE_NEON)
+
+#include "../enc/vp8enci.h"
+
+//------------------------------------------------------------------------------
+// Transforms (Paragraph 14.4)
+
+// Inverse transform.
+// This code is pretty much the same as TransformOneNEON in the decoder, except
+// for subtraction to *ref. See the comments there for algorithmic explanations.
+static void ITransformOne(const uint8_t* ref,
+ const int16_t* in, uint8_t* dst) {
+ const int kBPS = BPS;
+ const int16_t kC1C2[] = { 20091, 17734, 0, 0 }; // kC1 / (kC2 >> 1) / 0 / 0
+
+ __asm__ volatile (
+ "vld1.16 {q1, q2}, [%[in]] \n"
+ "vld1.16 {d0}, [%[kC1C2]] \n"
+
+ // d2: in[0]
+ // d3: in[8]
+ // d4: in[4]
+ // d5: in[12]
+ "vswp d3, d4 \n"
+
+ // q8 = {in[4], in[12]} * kC1 * 2 >> 16
+ // q9 = {in[4], in[12]} * kC2 >> 16
+ "vqdmulh.s16 q8, q2, d0[0] \n"
+ "vqdmulh.s16 q9, q2, d0[1] \n"
+
+ // d22 = a = in[0] + in[8]
+ // d23 = b = in[0] - in[8]
+ "vqadd.s16 d22, d2, d3 \n"
+ "vqsub.s16 d23, d2, d3 \n"
+
+ // q8 = in[4]/[12] * kC1 >> 16
+ "vshr.s16 q8, q8, #1 \n"
+
+ // Add {in[4], in[12]} back after the multiplication.
+ "vqadd.s16 q8, q2, q8 \n"
+
+ // d20 = c = in[4]*kC2 - in[12]*kC1
+ // d21 = d = in[4]*kC1 + in[12]*kC2
+ "vqsub.s16 d20, d18, d17 \n"
+ "vqadd.s16 d21, d19, d16 \n"
+
+ // d2 = tmp[0] = a + d
+ // d3 = tmp[1] = b + c
+ // d4 = tmp[2] = b - c
+ // d5 = tmp[3] = a - d
+ "vqadd.s16 d2, d22, d21 \n"
+ "vqadd.s16 d3, d23, d20 \n"
+ "vqsub.s16 d4, d23, d20 \n"
+ "vqsub.s16 d5, d22, d21 \n"
+
+ "vzip.16 q1, q2 \n"
+ "vzip.16 q1, q2 \n"
+
+ "vswp d3, d4 \n"
+
+ // q8 = {tmp[4], tmp[12]} * kC1 * 2 >> 16
+ // q9 = {tmp[4], tmp[12]} * kC2 >> 16
+ "vqdmulh.s16 q8, q2, d0[0] \n"
+ "vqdmulh.s16 q9, q2, d0[1] \n"
+
+ // d22 = a = tmp[0] + tmp[8]
+ // d23 = b = tmp[0] - tmp[8]
+ "vqadd.s16 d22, d2, d3 \n"
+ "vqsub.s16 d23, d2, d3 \n"
+
+ "vshr.s16 q8, q8, #1 \n"
+ "vqadd.s16 q8, q2, q8 \n"
+
+ // d20 = c = in[4]*kC2 - in[12]*kC1
+ // d21 = d = in[4]*kC1 + in[12]*kC2
+ "vqsub.s16 d20, d18, d17 \n"
+ "vqadd.s16 d21, d19, d16 \n"
+
+ // d2 = tmp[0] = a + d
+ // d3 = tmp[1] = b + c
+ // d4 = tmp[2] = b - c
+ // d5 = tmp[3] = a - d
+ "vqadd.s16 d2, d22, d21 \n"
+ "vqadd.s16 d3, d23, d20 \n"
+ "vqsub.s16 d4, d23, d20 \n"
+ "vqsub.s16 d5, d22, d21 \n"
+
+ "vld1.32 d6[0], [%[ref]], %[kBPS] \n"
+ "vld1.32 d6[1], [%[ref]], %[kBPS] \n"
+ "vld1.32 d7[0], [%[ref]], %[kBPS] \n"
+ "vld1.32 d7[1], [%[ref]], %[kBPS] \n"
+
+ "sub %[ref], %[ref], %[kBPS], lsl #2 \n"
+
+ // (val) + 4 >> 3
+ "vrshr.s16 d2, d2, #3 \n"
+ "vrshr.s16 d3, d3, #3 \n"
+ "vrshr.s16 d4, d4, #3 \n"
+ "vrshr.s16 d5, d5, #3 \n"
+
+ "vzip.16 q1, q2 \n"
+ "vzip.16 q1, q2 \n"
+
+ // Must accumulate before saturating
+ "vmovl.u8 q8, d6 \n"
+ "vmovl.u8 q9, d7 \n"
+
+ "vqadd.s16 q1, q1, q8 \n"
+ "vqadd.s16 q2, q2, q9 \n"
+
+ "vqmovun.s16 d0, q1 \n"
+ "vqmovun.s16 d1, q2 \n"
+
+ "vst1.32 d0[0], [%[dst]], %[kBPS] \n"
+ "vst1.32 d0[1], [%[dst]], %[kBPS] \n"
+ "vst1.32 d1[0], [%[dst]], %[kBPS] \n"
+ "vst1.32 d1[1], [%[dst]] \n"
+
+ : [in] "+r"(in), [dst] "+r"(dst) // modified registers
+ : [kBPS] "r"(kBPS), [kC1C2] "r"(kC1C2), [ref] "r"(ref) // constants
+ : "memory", "q0", "q1", "q2", "q8", "q9", "q10", "q11" // clobbered
+ );
+}
+
+static void ITransform(const uint8_t* ref,
+ const int16_t* in, uint8_t* dst, int do_two) {
+ ITransformOne(ref, in, dst);
+ if (do_two) {
+ ITransformOne(ref + 4, in + 16, dst + 4);
+ }
+}
+
+// Same code as dec_neon.c
+static void ITransformWHT(const int16_t* in, int16_t* out) {
+ const int kStep = 32; // The store is only incrementing the pointer as if we
+ // had stored a single byte.
+ __asm__ volatile (
+ // part 1
+ // load data into q0, q1
+ "vld1.16 {q0, q1}, [%[in]] \n"
+
+ "vaddl.s16 q2, d0, d3 \n" // a0 = in[0] + in[12]
+ "vaddl.s16 q3, d1, d2 \n" // a1 = in[4] + in[8]
+ "vsubl.s16 q4, d1, d2 \n" // a2 = in[4] - in[8]
+ "vsubl.s16 q5, d0, d3 \n" // a3 = in[0] - in[12]
+
+ "vadd.s32 q0, q2, q3 \n" // tmp[0] = a0 + a1
+ "vsub.s32 q2, q2, q3 \n" // tmp[8] = a0 - a1
+ "vadd.s32 q1, q5, q4 \n" // tmp[4] = a3 + a2
+ "vsub.s32 q3, q5, q4 \n" // tmp[12] = a3 - a2
+
+ // Transpose
+ // q0 = tmp[0, 4, 8, 12], q1 = tmp[2, 6, 10, 14]
+ // q2 = tmp[1, 5, 9, 13], q3 = tmp[3, 7, 11, 15]
+ "vswp d1, d4 \n" // vtrn.64 q0, q2
+ "vswp d3, d6 \n" // vtrn.64 q1, q3
+ "vtrn.32 q0, q1 \n"
+ "vtrn.32 q2, q3 \n"
+
+ "vmov.s32 q4, #3 \n" // dc = 3
+ "vadd.s32 q0, q0, q4 \n" // dc = tmp[0] + 3
+ "vadd.s32 q6, q0, q3 \n" // a0 = dc + tmp[3]
+ "vadd.s32 q7, q1, q2 \n" // a1 = tmp[1] + tmp[2]
+ "vsub.s32 q8, q1, q2 \n" // a2 = tmp[1] - tmp[2]
+ "vsub.s32 q9, q0, q3 \n" // a3 = dc - tmp[3]
+
+ "vadd.s32 q0, q6, q7 \n"
+ "vshrn.s32 d0, q0, #3 \n" // (a0 + a1) >> 3
+ "vadd.s32 q1, q9, q8 \n"
+ "vshrn.s32 d1, q1, #3 \n" // (a3 + a2) >> 3
+ "vsub.s32 q2, q6, q7 \n"
+ "vshrn.s32 d2, q2, #3 \n" // (a0 - a1) >> 3
+ "vsub.s32 q3, q9, q8 \n"
+ "vshrn.s32 d3, q3, #3 \n" // (a3 - a2) >> 3
+
+ // set the results to output
+ "vst1.16 d0[0], [%[out]], %[kStep] \n"
+ "vst1.16 d1[0], [%[out]], %[kStep] \n"
+ "vst1.16 d2[0], [%[out]], %[kStep] \n"
+ "vst1.16 d3[0], [%[out]], %[kStep] \n"
+ "vst1.16 d0[1], [%[out]], %[kStep] \n"
+ "vst1.16 d1[1], [%[out]], %[kStep] \n"
+ "vst1.16 d2[1], [%[out]], %[kStep] \n"
+ "vst1.16 d3[1], [%[out]], %[kStep] \n"
+ "vst1.16 d0[2], [%[out]], %[kStep] \n"
+ "vst1.16 d1[2], [%[out]], %[kStep] \n"
+ "vst1.16 d2[2], [%[out]], %[kStep] \n"
+ "vst1.16 d3[2], [%[out]], %[kStep] \n"
+ "vst1.16 d0[3], [%[out]], %[kStep] \n"
+ "vst1.16 d1[3], [%[out]], %[kStep] \n"
+ "vst1.16 d2[3], [%[out]], %[kStep] \n"
+ "vst1.16 d3[3], [%[out]], %[kStep] \n"
+
+ : [out] "+r"(out) // modified registers
+ : [in] "r"(in), [kStep] "r"(kStep) // constants
+ : "memory", "q0", "q1", "q2", "q3", "q4",
+ "q5", "q6", "q7", "q8", "q9" // clobbered
+ );
+}
+
+// Forward transform.
+
+// adapted from vp8/encoder/arm/neon/shortfdct_neon.asm
+static const int16_t kCoeff16[] = {
+ 5352, 5352, 5352, 5352, 2217, 2217, 2217, 2217
+};
+static const int32_t kCoeff32[] = {
+ 1812, 1812, 1812, 1812,
+ 937, 937, 937, 937,
+ 12000, 12000, 12000, 12000,
+ 51000, 51000, 51000, 51000
+};
+
+static void FTransform(const uint8_t* src, const uint8_t* ref,
+ int16_t* out) {
+ const int kBPS = BPS;
+ const uint8_t* src_ptr = src;
+ const uint8_t* ref_ptr = ref;
+ const int16_t* coeff16 = kCoeff16;
+ const int32_t* coeff32 = kCoeff32;
+
+ __asm__ volatile (
+ // load src into q4, q5 in high half
+ "vld1.8 {d8}, [%[src_ptr]], %[kBPS] \n"
+ "vld1.8 {d10}, [%[src_ptr]], %[kBPS] \n"
+ "vld1.8 {d9}, [%[src_ptr]], %[kBPS] \n"
+ "vld1.8 {d11}, [%[src_ptr]] \n"
+
+ // load ref into q6, q7 in high half
+ "vld1.8 {d12}, [%[ref_ptr]], %[kBPS] \n"
+ "vld1.8 {d14}, [%[ref_ptr]], %[kBPS] \n"
+ "vld1.8 {d13}, [%[ref_ptr]], %[kBPS] \n"
+ "vld1.8 {d15}, [%[ref_ptr]] \n"
+
+ // Pack the high values in to q4 and q6
+ "vtrn.32 q4, q5 \n"
+ "vtrn.32 q6, q7 \n"
+
+ // d[0-3] = src - ref
+ "vsubl.u8 q0, d8, d12 \n"
+ "vsubl.u8 q1, d9, d13 \n"
+
+ // load coeff16 into q8(d16=5352, d17=2217)
+ "vld1.16 {q8}, [%[coeff16]] \n"
+
+ // load coeff32 high half into q9 = 1812, q10 = 937
+ "vld1.32 {q9, q10}, [%[coeff32]]! \n"
+
+ // load coeff32 low half into q11=12000, q12=51000
+ "vld1.32 {q11,q12}, [%[coeff32]] \n"
+
+ // part 1
+ // Transpose. Register dN is the same as dN in C
+ "vtrn.32 d0, d2 \n"
+ "vtrn.32 d1, d3 \n"
+ "vtrn.16 d0, d1 \n"
+ "vtrn.16 d2, d3 \n"
+
+ "vadd.s16 d4, d0, d3 \n" // a0 = d0 + d3
+ "vadd.s16 d5, d1, d2 \n" // a1 = d1 + d2
+ "vsub.s16 d6, d1, d2 \n" // a2 = d1 - d2
+ "vsub.s16 d7, d0, d3 \n" // a3 = d0 - d3
+
+ "vadd.s16 d0, d4, d5 \n" // a0 + a1
+ "vshl.s16 d0, d0, #3 \n" // temp[0+i*4] = (a0+a1) << 3
+ "vsub.s16 d2, d4, d5 \n" // a0 - a1
+ "vshl.s16 d2, d2, #3 \n" // (temp[2+i*4] = (a0-a1) << 3
+
+ "vmlal.s16 q9, d7, d16 \n" // a3*5352 + 1812
+ "vmlal.s16 q10, d7, d17 \n" // a3*2217 + 937
+ "vmlal.s16 q9, d6, d17 \n" // a2*2217 + a3*5352 + 1812
+ "vmlsl.s16 q10, d6, d16 \n" // a3*2217 + 937 - a2*5352
+
+ // temp[1+i*4] = (d2*2217 + d3*5352 + 1812) >> 9
+ // temp[3+i*4] = (d3*2217 + 937 - d2*5352) >> 9
+ "vshrn.s32 d1, q9, #9 \n"
+ "vshrn.s32 d3, q10, #9 \n"
+
+ // part 2
+ // transpose d0=ip[0], d1=ip[4], d2=ip[8], d3=ip[12]
+ "vtrn.32 d0, d2 \n"
+ "vtrn.32 d1, d3 \n"
+ "vtrn.16 d0, d1 \n"
+ "vtrn.16 d2, d3 \n"
+
+ "vmov.s16 d26, #7 \n"
+
+ "vadd.s16 d4, d0, d3 \n" // a1 = ip[0] + ip[12]
+ "vadd.s16 d5, d1, d2 \n" // b1 = ip[4] + ip[8]
+ "vsub.s16 d6, d1, d2 \n" // c1 = ip[4] - ip[8]
+ "vadd.s16 d4, d4, d26 \n" // a1 + 7
+ "vsub.s16 d7, d0, d3 \n" // d1 = ip[0] - ip[12]
+
+ "vadd.s16 d0, d4, d5 \n" // op[0] = a1 + b1 + 7
+ "vsub.s16 d2, d4, d5 \n" // op[8] = a1 - b1 + 7
+
+ "vmlal.s16 q11, d7, d16 \n" // d1*5352 + 12000
+ "vmlal.s16 q12, d7, d17 \n" // d1*2217 + 51000
+
+ "vceq.s16 d4, d7, #0 \n"
+
+ "vshr.s16 d0, d0, #4 \n"
+ "vshr.s16 d2, d2, #4 \n"
+
+ "vmlal.s16 q11, d6, d17 \n" // c1*2217 + d1*5352 + 12000
+ "vmlsl.s16 q12, d6, d16 \n" // d1*2217 - c1*5352 + 51000
+
+ "vmvn.s16 d4, d4 \n"
+ // op[4] = (c1*2217 + d1*5352 + 12000)>>16
+ "vshrn.s32 d1, q11, #16 \n"
+ // op[4] += (d1!=0)
+ "vsub.s16 d1, d1, d4 \n"
+ // op[12]= (d1*2217 - c1*5352 + 51000)>>16
+ "vshrn.s32 d3, q12, #16 \n"
+
+ // set result to out array
+ "vst1.16 {q0, q1}, [%[out]] \n"
+ : [src_ptr] "+r"(src_ptr), [ref_ptr] "+r"(ref_ptr),
+ [coeff32] "+r"(coeff32) // modified registers
+ : [kBPS] "r"(kBPS), [coeff16] "r"(coeff16),
+ [out] "r"(out) // constants
+ : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9",
+ "q10", "q11", "q12", "q13" // clobbered
+ );
+}
+
+static void FTransformWHT(const int16_t* in, int16_t* out) {
+ const int kStep = 32;
+ __asm__ volatile (
+ // d0 = in[0 * 16] , d1 = in[1 * 16]
+ // d2 = in[2 * 16] , d3 = in[3 * 16]
+ "vld1.16 d0[0], [%[in]], %[kStep] \n"
+ "vld1.16 d1[0], [%[in]], %[kStep] \n"
+ "vld1.16 d2[0], [%[in]], %[kStep] \n"
+ "vld1.16 d3[0], [%[in]], %[kStep] \n"
+ "vld1.16 d0[1], [%[in]], %[kStep] \n"
+ "vld1.16 d1[1], [%[in]], %[kStep] \n"
+ "vld1.16 d2[1], [%[in]], %[kStep] \n"
+ "vld1.16 d3[1], [%[in]], %[kStep] \n"
+ "vld1.16 d0[2], [%[in]], %[kStep] \n"
+ "vld1.16 d1[2], [%[in]], %[kStep] \n"
+ "vld1.16 d2[2], [%[in]], %[kStep] \n"
+ "vld1.16 d3[2], [%[in]], %[kStep] \n"
+ "vld1.16 d0[3], [%[in]], %[kStep] \n"
+ "vld1.16 d1[3], [%[in]], %[kStep] \n"
+ "vld1.16 d2[3], [%[in]], %[kStep] \n"
+ "vld1.16 d3[3], [%[in]], %[kStep] \n"
+
+ "vaddl.s16 q2, d0, d2 \n"
+ "vshl.s32 q2, q2, #2 \n" // a0=(in[0*16]+in[2*16])<<2
+ "vaddl.s16 q3, d1, d3 \n"
+ "vshl.s32 q3, q3, #2 \n" // a1=(in[1*16]+in[3*16])<<2
+ "vsubl.s16 q4, d1, d3 \n"
+ "vshl.s32 q4, q4, #2 \n" // a2=(in[1*16]-in[3*16])<<2
+ "vsubl.s16 q5, d0, d2 \n"
+ "vshl.s32 q5, q5, #2 \n" // a3=(in[0*16]-in[2*16])<<2
+
+ "vceq.s32 q10, q2, #0 \n"
+ "vmvn.s32 q10, q10 \n" // (a0 != 0)
+ "vqadd.s32 q6, q2, q3 \n" // (a0 + a1)
+ "vqsub.s32 q6, q6, q10 \n" // (a0 + a1) + (a0 != 0)
+ "vqadd.s32 q7, q5, q4 \n" // a3 + a2
+ "vqsub.s32 q8, q5, q4 \n" // a3 - a2
+ "vqsub.s32 q9, q2, q3 \n" // a0 - a1
+
+ // Transpose
+ // q6 = tmp[0, 1, 2, 3] ; q7 = tmp[ 4, 5, 6, 7]
+ // q8 = tmp[8, 9, 10, 11] ; q9 = tmp[12, 13, 14, 15]
+ "vswp d13, d16 \n" // vtrn.64 q0, q2
+ "vswp d15, d18 \n" // vtrn.64 q1, q3
+ "vtrn.32 q6, q7 \n"
+ "vtrn.32 q8, q9 \n"
+
+ "vqadd.s32 q0, q6, q8 \n" // a0 = tmp[0] + tmp[8]
+ "vqadd.s32 q1, q7, q9 \n" // a1 = tmp[4] + tmp[12]
+ "vqsub.s32 q2, q7, q9 \n" // a2 = tmp[4] - tmp[12]
+ "vqsub.s32 q3, q6, q8 \n" // a3 = tmp[0] - tmp[8]
+
+ "vqadd.s32 q4, q0, q1 \n" // b0 = a0 + a1
+ "vqadd.s32 q5, q3, q2 \n" // b1 = a3 + a2
+ "vqsub.s32 q6, q3, q2 \n" // b2 = a3 - a2
+ "vqsub.s32 q7, q0, q1 \n" // b3 = a0 - a1
+
+ "vmov.s32 q0, #3 \n" // q0 = 3
+
+ "vcgt.s32 q1, q4, #0 \n" // (b0>0)
+ "vqsub.s32 q2, q4, q1 \n" // (b0+(b0>0))
+ "vqadd.s32 q3, q2, q0 \n" // (b0+(b0>0)+3)
+ "vshrn.s32 d18, q3, #3 \n" // (b0+(b0>0)+3) >> 3
+
+ "vcgt.s32 q1, q5, #0 \n" // (b1>0)
+ "vqsub.s32 q2, q5, q1 \n" // (b1+(b1>0))
+ "vqadd.s32 q3, q2, q0 \n" // (b1+(b1>0)+3)
+ "vshrn.s32 d19, q3, #3 \n" // (b1+(b1>0)+3) >> 3
+
+ "vcgt.s32 q1, q6, #0 \n" // (b2>0)
+ "vqsub.s32 q2, q6, q1 \n" // (b2+(b2>0))
+ "vqadd.s32 q3, q2, q0 \n" // (b2+(b2>0)+3)
+ "vshrn.s32 d20, q3, #3 \n" // (b2+(b2>0)+3) >> 3
+
+ "vcgt.s32 q1, q7, #0 \n" // (b3>0)
+ "vqsub.s32 q2, q7, q1 \n" // (b3+(b3>0))
+ "vqadd.s32 q3, q2, q0 \n" // (b3+(b3>0)+3)
+ "vshrn.s32 d21, q3, #3 \n" // (b3+(b3>0)+3) >> 3
+
+ "vst1.16 {q9, q10}, [%[out]] \n"
+
+ : [in] "+r"(in)
+ : [kStep] "r"(kStep), [out] "r"(out)
+ : "memory", "q0", "q1", "q2", "q3", "q4", "q5",
+ "q6", "q7", "q8", "q9", "q10" // clobbered
+ ) ;
+}
+
+//------------------------------------------------------------------------------
+// Texture distortion
+//
+// We try to match the spectral content (weighted) between source and
+// reconstructed samples.
+
+// Hadamard transform
+// Returns the weighted sum of the absolute value of transformed coefficients.
+// This uses a TTransform helper function in C
+static int Disto4x4(const uint8_t* const a, const uint8_t* const b,
+ const uint16_t* const w) {
+ const int kBPS = BPS;
+ const uint8_t* A = a;
+ const uint8_t* B = b;
+ const uint16_t* W = w;
+ int sum;
+ __asm__ volatile (
+ "vld1.32 d0[0], [%[a]], %[kBPS] \n"
+ "vld1.32 d0[1], [%[a]], %[kBPS] \n"
+ "vld1.32 d2[0], [%[a]], %[kBPS] \n"
+ "vld1.32 d2[1], [%[a]] \n"
+
+ "vld1.32 d1[0], [%[b]], %[kBPS] \n"
+ "vld1.32 d1[1], [%[b]], %[kBPS] \n"
+ "vld1.32 d3[0], [%[b]], %[kBPS] \n"
+ "vld1.32 d3[1], [%[b]] \n"
+
+ // a d0/d2, b d1/d3
+ // d0/d1: 01 01 01 01
+ // d2/d3: 23 23 23 23
+ // But: it goes 01 45 23 67
+ // Notice the middle values are transposed
+ "vtrn.16 q0, q1 \n"
+
+ // {a0, a1} = {in[0] + in[2], in[1] + in[3]}
+ "vaddl.u8 q2, d0, d2 \n"
+ "vaddl.u8 q10, d1, d3 \n"
+ // {a3, a2} = {in[0] - in[2], in[1] - in[3]}
+ "vsubl.u8 q3, d0, d2 \n"
+ "vsubl.u8 q11, d1, d3 \n"
+
+ // tmp[0] = a0 + a1
+ "vpaddl.s16 q0, q2 \n"
+ "vpaddl.s16 q8, q10 \n"
+
+ // tmp[1] = a3 + a2
+ "vpaddl.s16 q1, q3 \n"
+ "vpaddl.s16 q9, q11 \n"
+
+ // No pair subtract
+ // q2 = {a0, a3}
+ // q3 = {a1, a2}
+ "vtrn.16 q2, q3 \n"
+ "vtrn.16 q10, q11 \n"
+
+ // {tmp[3], tmp[2]} = {a0 - a1, a3 - a2}
+ "vsubl.s16 q12, d4, d6 \n"
+ "vsubl.s16 q13, d5, d7 \n"
+ "vsubl.s16 q14, d20, d22 \n"
+ "vsubl.s16 q15, d21, d23 \n"
+
+ // separate tmp[3] and tmp[2]
+ // q12 = tmp[3]
+ // q13 = tmp[2]
+ "vtrn.32 q12, q13 \n"
+ "vtrn.32 q14, q15 \n"
+
+ // Transpose tmp for a
+ "vswp d1, d26 \n" // vtrn.64
+ "vswp d3, d24 \n" // vtrn.64
+ "vtrn.32 q0, q1 \n"
+ "vtrn.32 q13, q12 \n"
+
+ // Transpose tmp for b
+ "vswp d17, d30 \n" // vtrn.64
+ "vswp d19, d28 \n" // vtrn.64
+ "vtrn.32 q8, q9 \n"
+ "vtrn.32 q15, q14 \n"
+
+ // The first Q register is a, the second b.
+ // q0/8 tmp[0-3]
+ // q13/15 tmp[4-7]
+ // q1/9 tmp[8-11]
+ // q12/14 tmp[12-15]
+
+ // These are still in 01 45 23 67 order. We fix it easily in the addition
+ // case but the subtraction propegates them.
+ "vswp d3, d27 \n"
+ "vswp d19, d31 \n"
+
+ // a0 = tmp[0] + tmp[8]
+ "vadd.s32 q2, q0, q1 \n"
+ "vadd.s32 q3, q8, q9 \n"
+
+ // a1 = tmp[4] + tmp[12]
+ "vadd.s32 q10, q13, q12 \n"
+ "vadd.s32 q11, q15, q14 \n"
+
+ // a2 = tmp[4] - tmp[12]
+ "vsub.s32 q13, q13, q12 \n"
+ "vsub.s32 q15, q15, q14 \n"
+
+ // a3 = tmp[0] - tmp[8]
+ "vsub.s32 q0, q0, q1 \n"
+ "vsub.s32 q8, q8, q9 \n"
+
+ // b0 = a0 + a1
+ "vadd.s32 q1, q2, q10 \n"
+ "vadd.s32 q9, q3, q11 \n"
+
+ // b1 = a3 + a2
+ "vadd.s32 q12, q0, q13 \n"
+ "vadd.s32 q14, q8, q15 \n"
+
+ // b2 = a3 - a2
+ "vsub.s32 q0, q0, q13 \n"
+ "vsub.s32 q8, q8, q15 \n"
+
+ // b3 = a0 - a1
+ "vsub.s32 q2, q2, q10 \n"
+ "vsub.s32 q3, q3, q11 \n"
+
+ "vld1.64 {q10, q11}, [%[w]] \n"
+
+ // abs(b0)
+ "vabs.s32 q1, q1 \n"
+ "vabs.s32 q9, q9 \n"
+ // abs(b1)
+ "vabs.s32 q12, q12 \n"
+ "vabs.s32 q14, q14 \n"
+ // abs(b2)
+ "vabs.s32 q0, q0 \n"
+ "vabs.s32 q8, q8 \n"
+ // abs(b3)
+ "vabs.s32 q2, q2 \n"
+ "vabs.s32 q3, q3 \n"
+
+ // expand w before using.
+ "vmovl.u16 q13, d20 \n"
+ "vmovl.u16 q15, d21 \n"
+
+ // w[0] * abs(b0)
+ "vmul.u32 q1, q1, q13 \n"
+ "vmul.u32 q9, q9, q13 \n"
+
+ // w[4] * abs(b1)
+ "vmla.u32 q1, q12, q15 \n"
+ "vmla.u32 q9, q14, q15 \n"
+
+ // expand w before using.
+ "vmovl.u16 q13, d22 \n"
+ "vmovl.u16 q15, d23 \n"
+
+ // w[8] * abs(b1)
+ "vmla.u32 q1, q0, q13 \n"
+ "vmla.u32 q9, q8, q13 \n"
+
+ // w[12] * abs(b1)
+ "vmla.u32 q1, q2, q15 \n"
+ "vmla.u32 q9, q3, q15 \n"
+
+ // Sum the arrays
+ "vpaddl.u32 q1, q1 \n"
+ "vpaddl.u32 q9, q9 \n"
+ "vadd.u64 d2, d3 \n"
+ "vadd.u64 d18, d19 \n"
+
+ // Hadamard transform needs 4 bits of extra precision (2 bits in each
+ // direction) for dynamic raw. Weights w[] are 16bits at max, so the maximum
+ // precision for coeff is 8bit of input + 4bits of Hadamard transform +
+ // 16bits for w[] + 2 bits of abs() summation.
+ //
+ // This uses a maximum of 31 bits (signed). Discarding the top 32 bits is
+ // A-OK.
+
+ // sum2 - sum1
+ "vsub.u32 d0, d2, d18 \n"
+ // abs(sum2 - sum1)
+ "vabs.s32 d0, d0 \n"
+ // abs(sum2 - sum1) >> 5
+ "vshr.u32 d0, #5 \n"
+
+ // It would be better to move the value straight into r0 but I'm not
+ // entirely sure how this works with inline assembly.
+ "vmov.32 %[sum], d0[0] \n"
+
+ : [sum] "=r"(sum), [a] "+r"(A), [b] "+r"(B), [w] "+r"(W)
+ : [kBPS] "r"(kBPS)
+ : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9",
+ "q10", "q11", "q12", "q13", "q14", "q15" // clobbered
+ ) ;
+
+ return sum;
+}
+
+static int Disto16x16(const uint8_t* const a, const uint8_t* const b,
+ const uint16_t* const w) {
+ int D = 0;
+ int x, y;
+ for (y = 0; y < 16 * BPS; y += 4 * BPS) {
+ for (x = 0; x < 16; x += 4) {
+ D += Disto4x4(a + x + y, b + x + y, w);
+ }
+ }
+ return D;
+}
+
+#endif // WEBP_USE_NEON
+
+//------------------------------------------------------------------------------
+// Entry point
+
+extern void VP8EncDspInitNEON(void);
+
+void VP8EncDspInitNEON(void) {
+#if defined(WEBP_USE_NEON)
+ VP8ITransform = ITransform;
+ VP8FTransform = FTransform;
+
+ VP8ITransformWHT = ITransformWHT;
+ VP8FTransformWHT = FTransformWHT;
+
+ VP8TDisto4x4 = Disto4x4;
+ VP8TDisto16x16 = Disto16x16;
+#endif // WEBP_USE_NEON
+}
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/src/dsp/enc_sse2.c b/src/dsp/enc_sse2.c
index b046761d..c4148b56 100644
--- a/src/dsp/enc_sse2.c
+++ b/src/dsp/enc_sse2.c
@@ -11,27 +11,58 @@
#include "./dsp.h"
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
#if defined(WEBP_USE_SSE2)
#include <stdlib.h> // for abs()
#include <emmintrin.h>
#include "../enc/vp8enci.h"
-#if defined(__cplusplus) || defined(c_plusplus)
-extern "C" {
+//------------------------------------------------------------------------------
+// Quite useful macro for debugging. Left here for convenience.
+
+#if 0
+#include <stdio.h>
+static void PrintReg(const __m128i r, const char* const name, int size) {
+ int n;
+ union {
+ __m128i r;
+ uint8_t i8[16];
+ uint16_t i16[8];
+ uint32_t i32[4];
+ uint64_t i64[2];
+ } tmp;
+ tmp.r = r;
+ printf("%s\t: ", name);
+ if (size == 8) {
+ for (n = 0; n < 16; ++n) printf("%.2x ", tmp.i8[n]);
+ } else if (size == 16) {
+ for (n = 0; n < 8; ++n) printf("%.4x ", tmp.i16[n]);
+ } else if (size == 32) {
+ for (n = 0; n < 4; ++n) printf("%.8x ", tmp.i32[n]);
+ } else {
+ for (n = 0; n < 2; ++n) printf("%.16lx ", tmp.i64[n]);
+ }
+ printf("\n");
+}
#endif
//------------------------------------------------------------------------------
// Compute susceptibility based on DCT-coeff histograms:
// the higher, the "easier" the macroblock is to compress.
-static int CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred,
- int start_block, int end_block) {
- int histo[MAX_COEFF_THRESH + 1] = { 0 };
- int16_t out[16];
- int j, k;
+static void CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred,
+ int start_block, int end_block,
+ VP8Histogram* const histo) {
const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH);
+ int j;
for (j = start_block; j < end_block; ++j) {
+ int16_t out[16];
+ int k;
+
VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out);
// Convert coefficients to bin (within out[]).
@@ -47,9 +78,9 @@ static int CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred,
const __m128i xor1 = _mm_xor_si128(out1, sign1);
const __m128i abs0 = _mm_sub_epi16(xor0, sign0);
const __m128i abs1 = _mm_sub_epi16(xor1, sign1);
- // v = abs(out) >> 2
- const __m128i v0 = _mm_srai_epi16(abs0, 2);
- const __m128i v1 = _mm_srai_epi16(abs1, 2);
+ // v = abs(out) >> 3
+ const __m128i v0 = _mm_srai_epi16(abs0, 3);
+ const __m128i v1 = _mm_srai_epi16(abs1, 3);
// bin = min(v, MAX_COEFF_THRESH)
const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh);
const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh);
@@ -58,13 +89,11 @@ static int CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred,
_mm_storeu_si128((__m128i*)&out[8], bin1);
}
- // Use bin to update histogram.
+ // Convert coefficients to bin.
for (k = 0; k < 16; ++k) {
- histo[out[k]]++;
+ histo->distribution[out[k]]++;
}
}
-
- return VP8GetAlpha(histo);
}
//------------------------------------------------------------------------------
@@ -243,7 +272,7 @@ static void ITransformSSE2(const uint8_t* ref, const int16_t* in, uint8_t* dst,
// Add inverse transform to 'ref' and store.
{
- const __m128i zero = _mm_set1_epi16(0);
+ const __m128i zero = _mm_setzero_si128();
// Load the reference(s).
__m128i ref0, ref1, ref2, ref3;
if (do_two) {
@@ -295,17 +324,23 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
int16_t* out) {
const __m128i zero = _mm_setzero_si128();
const __m128i seven = _mm_set1_epi16(7);
- const __m128i k7500 = _mm_set1_epi32(7500);
- const __m128i k14500 = _mm_set1_epi32(14500);
+ const __m128i k937 = _mm_set1_epi32(937);
+ const __m128i k1812 = _mm_set1_epi32(1812);
const __m128i k51000 = _mm_set1_epi32(51000);
const __m128i k12000_plus_one = _mm_set1_epi32(12000 + (1 << 16));
const __m128i k5352_2217 = _mm_set_epi16(5352, 2217, 5352, 2217,
5352, 2217, 5352, 2217);
const __m128i k2217_5352 = _mm_set_epi16(2217, -5352, 2217, -5352,
2217, -5352, 2217, -5352);
-
+ const __m128i k88p = _mm_set_epi16(8, 8, 8, 8, 8, 8, 8, 8);
+ const __m128i k88m = _mm_set_epi16(-8, 8, -8, 8, -8, 8, -8, 8);
+ const __m128i k5352_2217p = _mm_set_epi16(2217, 5352, 2217, 5352,
+ 2217, 5352, 2217, 5352);
+ const __m128i k5352_2217m = _mm_set_epi16(-5352, 2217, -5352, 2217,
+ -5352, 2217, -5352, 2217);
__m128i v01, v32;
+
// Difference between src and ref and initial transpose.
{
// Load src and convert to 16b.
@@ -326,73 +361,52 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
const __m128i ref_1 = _mm_unpacklo_epi8(ref1, zero);
const __m128i ref_2 = _mm_unpacklo_epi8(ref2, zero);
const __m128i ref_3 = _mm_unpacklo_epi8(ref3, zero);
- // Compute difference.
+ // Compute difference. -> 00 01 02 03 00 00 00 00
const __m128i diff0 = _mm_sub_epi16(src_0, ref_0);
const __m128i diff1 = _mm_sub_epi16(src_1, ref_1);
const __m128i diff2 = _mm_sub_epi16(src_2, ref_2);
const __m128i diff3 = _mm_sub_epi16(src_3, ref_3);
- // Transpose.
+
+ // Unpack and shuffle
// 00 01 02 03 0 0 0 0
// 10 11 12 13 0 0 0 0
// 20 21 22 23 0 0 0 0
// 30 31 32 33 0 0 0 0
- const __m128i transpose0_0 = _mm_unpacklo_epi16(diff0, diff1);
- const __m128i transpose0_1 = _mm_unpacklo_epi16(diff2, diff3);
- // 00 10 01 11 02 12 03 13
- // 20 30 21 31 22 32 23 33
- const __m128i v23 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
- v01 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
- v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2));
- // a02 a12 a22 a32 a03 a13 a23 a33
- // a00 a10 a20 a30 a01 a11 a21 a31
- // a03 a13 a23 a33 a02 a12 a22 a32
- }
-
- // First pass and subsequent transpose.
- {
- // Same operations are done on the (0,3) and (1,2) pairs.
- // b0 = (a0 + a3) << 3
- // b1 = (a1 + a2) << 3
- // b3 = (a0 - a3) << 3
- // b2 = (a1 - a2) << 3
- const __m128i a01 = _mm_add_epi16(v01, v32);
- const __m128i a32 = _mm_sub_epi16(v01, v32);
- const __m128i b01 = _mm_slli_epi16(a01, 3);
- const __m128i b32 = _mm_slli_epi16(a32, 3);
- const __m128i b11 = _mm_unpackhi_epi64(b01, b01);
- const __m128i b22 = _mm_unpackhi_epi64(b32, b32);
-
- // e0 = b0 + b1
- // e2 = b0 - b1
- const __m128i e0 = _mm_add_epi16(b01, b11);
- const __m128i e2 = _mm_sub_epi16(b01, b11);
- const __m128i e02 = _mm_unpacklo_epi64(e0, e2);
-
- // e1 = (b3 * 5352 + b2 * 2217 + 14500) >> 12
- // e3 = (b3 * 2217 - b2 * 5352 + 7500) >> 12
- const __m128i b23 = _mm_unpacklo_epi16(b22, b32);
- const __m128i c1 = _mm_madd_epi16(b23, k5352_2217);
- const __m128i c3 = _mm_madd_epi16(b23, k2217_5352);
- const __m128i d1 = _mm_add_epi32(c1, k14500);
- const __m128i d3 = _mm_add_epi32(c3, k7500);
- const __m128i e1 = _mm_srai_epi32(d1, 12);
- const __m128i e3 = _mm_srai_epi32(d3, 12);
- const __m128i e13 = _mm_packs_epi32(e1, e3);
-
- // Transpose.
- // 00 01 02 03 20 21 22 23
- // 10 11 12 13 30 31 32 33
- const __m128i transpose0_0 = _mm_unpacklo_epi16(e02, e13);
- const __m128i transpose0_1 = _mm_unpackhi_epi16(e02, e13);
- // 00 10 01 11 02 12 03 13
- // 20 30 21 31 22 32 23 33
- const __m128i v23 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
- v01 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
- v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2));
- // 02 12 22 32 03 13 23 33
- // 00 10 20 30 01 11 21 31
- // 03 13 23 33 02 12 22 32
+ const __m128i shuf01 = _mm_unpacklo_epi32(diff0, diff1);
+ const __m128i shuf23 = _mm_unpacklo_epi32(diff2, diff3);
+ // 00 01 10 11 02 03 12 13
+ // 20 21 30 31 22 23 32 33
+ const __m128i shuf01_p =
+ _mm_shufflehi_epi16(shuf01, _MM_SHUFFLE(2, 3, 0, 1));
+ const __m128i shuf23_p =
+ _mm_shufflehi_epi16(shuf23, _MM_SHUFFLE(2, 3, 0, 1));
+ // 00 01 10 11 03 02 13 12
+ // 20 21 30 31 23 22 33 32
+ const __m128i s01 = _mm_unpacklo_epi64(shuf01_p, shuf23_p);
+ const __m128i s32 = _mm_unpackhi_epi64(shuf01_p, shuf23_p);
+ // 00 01 10 11 20 21 30 31
+ // 03 02 13 12 23 22 33 32
+ const __m128i a01 = _mm_add_epi16(s01, s32);
+ const __m128i a32 = _mm_sub_epi16(s01, s32);
+ // [d0 + d3 | d1 + d2 | ...] = [a0 a1 | a0' a1' | ... ]
+ // [d0 - d3 | d1 - d2 | ...] = [a3 a2 | a3' a2' | ... ]
+
+ const __m128i tmp0 = _mm_madd_epi16(a01, k88p); // [ (a0 + a1) << 3, ... ]
+ const __m128i tmp2 = _mm_madd_epi16(a01, k88m); // [ (a0 - a1) << 3, ... ]
+ const __m128i tmp1_1 = _mm_madd_epi16(a32, k5352_2217p);
+ const __m128i tmp3_1 = _mm_madd_epi16(a32, k5352_2217m);
+ const __m128i tmp1_2 = _mm_add_epi32(tmp1_1, k1812);
+ const __m128i tmp3_2 = _mm_add_epi32(tmp3_1, k937);
+ const __m128i tmp1 = _mm_srai_epi32(tmp1_2, 9);
+ const __m128i tmp3 = _mm_srai_epi32(tmp3_2, 9);
+ const __m128i s03 = _mm_packs_epi32(tmp0, tmp2);
+ const __m128i s12 = _mm_packs_epi32(tmp1, tmp3);
+ const __m128i s_lo = _mm_unpacklo_epi16(s03, s12); // 0 1 0 1 0 1...
+ const __m128i s_hi = _mm_unpackhi_epi16(s03, s12); // 2 3 2 3 2 3
+ const __m128i v23 = _mm_unpackhi_epi32(s_lo, s_hi);
+ v01 = _mm_unpacklo_epi32(s_lo, s_hi);
+ v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2)); // 3 2 3 2 3 2..
}
// Second pass
@@ -406,13 +420,12 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
const __m128i a32 = _mm_sub_epi16(v01, v32);
const __m128i a11 = _mm_unpackhi_epi64(a01, a01);
const __m128i a22 = _mm_unpackhi_epi64(a32, a32);
+ const __m128i a01_plus_7 = _mm_add_epi16(a01, seven);
// d0 = (a0 + a1 + 7) >> 4;
// d2 = (a0 - a1 + 7) >> 4;
- const __m128i b0 = _mm_add_epi16(a01, a11);
- const __m128i b2 = _mm_sub_epi16(a01, a11);
- const __m128i c0 = _mm_add_epi16(b0, seven);
- const __m128i c2 = _mm_add_epi16(b2, seven);
+ const __m128i c0 = _mm_add_epi16(a01_plus_7, a11);
+ const __m128i c2 = _mm_sub_epi16(a01_plus_7, a11);
const __m128i d0 = _mm_srai_epi16(c0, 4);
const __m128i d2 = _mm_srai_epi16(c2, 4);
@@ -430,6 +443,7 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
// f1 = f1 + (a3 != 0);
// The compare will return (0xffff, 0) for (==0, !=0). To turn that into the
// desired (0, 1), we add one earlier through k12000_plus_one.
+ // -> f1 = f1 + 1 - (a3 == 0)
const __m128i g1 = _mm_add_epi16(f1, _mm_cmpeq_epi16(a32, zero));
_mm_storel_epi64((__m128i*)&out[ 0], d0);
@@ -442,10 +456,101 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
//------------------------------------------------------------------------------
// Metric
+static int SSE_Nx4SSE2(const uint8_t* a, const uint8_t* b,
+ int num_quads, int do_16) {
+ const __m128i zero = _mm_setzero_si128();
+ __m128i sum1 = zero;
+ __m128i sum2 = zero;
+
+ while (num_quads-- > 0) {
+ // Note: for the !do_16 case, we read 16 pixels instead of 8 but that's ok,
+ // thanks to buffer over-allocation to that effect.
+ const __m128i a0 = _mm_loadu_si128((__m128i*)&a[BPS * 0]);
+ const __m128i a1 = _mm_loadu_si128((__m128i*)&a[BPS * 1]);
+ const __m128i a2 = _mm_loadu_si128((__m128i*)&a[BPS * 2]);
+ const __m128i a3 = _mm_loadu_si128((__m128i*)&a[BPS * 3]);
+ const __m128i b0 = _mm_loadu_si128((__m128i*)&b[BPS * 0]);
+ const __m128i b1 = _mm_loadu_si128((__m128i*)&b[BPS * 1]);
+ const __m128i b2 = _mm_loadu_si128((__m128i*)&b[BPS * 2]);
+ const __m128i b3 = _mm_loadu_si128((__m128i*)&b[BPS * 3]);
+
+ // compute clip0(a-b) and clip0(b-a)
+ const __m128i a0p = _mm_subs_epu8(a0, b0);
+ const __m128i a0m = _mm_subs_epu8(b0, a0);
+ const __m128i a1p = _mm_subs_epu8(a1, b1);
+ const __m128i a1m = _mm_subs_epu8(b1, a1);
+ const __m128i a2p = _mm_subs_epu8(a2, b2);
+ const __m128i a2m = _mm_subs_epu8(b2, a2);
+ const __m128i a3p = _mm_subs_epu8(a3, b3);
+ const __m128i a3m = _mm_subs_epu8(b3, a3);
+
+ // compute |a-b| with 8b arithmetic as clip0(a-b) | clip0(b-a)
+ const __m128i diff0 = _mm_or_si128(a0p, a0m);
+ const __m128i diff1 = _mm_or_si128(a1p, a1m);
+ const __m128i diff2 = _mm_or_si128(a2p, a2m);
+ const __m128i diff3 = _mm_or_si128(a3p, a3m);
+
+ // unpack (only four operations, instead of eight)
+ const __m128i low0 = _mm_unpacklo_epi8(diff0, zero);
+ const __m128i low1 = _mm_unpacklo_epi8(diff1, zero);
+ const __m128i low2 = _mm_unpacklo_epi8(diff2, zero);
+ const __m128i low3 = _mm_unpacklo_epi8(diff3, zero);
+
+ // multiply with self
+ const __m128i low_madd0 = _mm_madd_epi16(low0, low0);
+ const __m128i low_madd1 = _mm_madd_epi16(low1, low1);
+ const __m128i low_madd2 = _mm_madd_epi16(low2, low2);
+ const __m128i low_madd3 = _mm_madd_epi16(low3, low3);
+
+ // collect in a cascading way
+ const __m128i low_sum0 = _mm_add_epi32(low_madd0, low_madd1);
+ const __m128i low_sum1 = _mm_add_epi32(low_madd2, low_madd3);
+ sum1 = _mm_add_epi32(sum1, low_sum0);
+ sum2 = _mm_add_epi32(sum2, low_sum1);
+
+ if (do_16) { // if necessary, process the higher 8 bytes similarly
+ const __m128i hi0 = _mm_unpackhi_epi8(diff0, zero);
+ const __m128i hi1 = _mm_unpackhi_epi8(diff1, zero);
+ const __m128i hi2 = _mm_unpackhi_epi8(diff2, zero);
+ const __m128i hi3 = _mm_unpackhi_epi8(diff3, zero);
+
+ const __m128i hi_madd0 = _mm_madd_epi16(hi0, hi0);
+ const __m128i hi_madd1 = _mm_madd_epi16(hi1, hi1);
+ const __m128i hi_madd2 = _mm_madd_epi16(hi2, hi2);
+ const __m128i hi_madd3 = _mm_madd_epi16(hi3, hi3);
+ const __m128i hi_sum0 = _mm_add_epi32(hi_madd0, hi_madd1);
+ const __m128i hi_sum1 = _mm_add_epi32(hi_madd2, hi_madd3);
+ sum1 = _mm_add_epi32(sum1, hi_sum0);
+ sum2 = _mm_add_epi32(sum2, hi_sum1);
+ }
+ a += 4 * BPS;
+ b += 4 * BPS;
+ }
+ {
+ int32_t tmp[4];
+ const __m128i sum = _mm_add_epi32(sum1, sum2);
+ _mm_storeu_si128((__m128i*)tmp, sum);
+ return (tmp[3] + tmp[2] + tmp[1] + tmp[0]);
+ }
+}
+
+static int SSE16x16SSE2(const uint8_t* a, const uint8_t* b) {
+ return SSE_Nx4SSE2(a, b, 4, 1);
+}
+
+static int SSE16x8SSE2(const uint8_t* a, const uint8_t* b) {
+ return SSE_Nx4SSE2(a, b, 2, 1);
+}
+
+static int SSE8x8SSE2(const uint8_t* a, const uint8_t* b) {
+ return SSE_Nx4SSE2(a, b, 2, 0);
+}
+
static int SSE4x4SSE2(const uint8_t* a, const uint8_t* b) {
- const __m128i zero = _mm_set1_epi16(0);
+ const __m128i zero = _mm_setzero_si128();
- // Load values.
+ // Load values. Note that we read 8 pixels instead of 4,
+ // but the a/b buffers are over-allocated to that effect.
const __m128i a0 = _mm_loadl_epi64((__m128i*)&a[BPS * 0]);
const __m128i a1 = _mm_loadl_epi64((__m128i*)&a[BPS * 1]);
const __m128i a2 = _mm_loadl_epi64((__m128i*)&a[BPS * 2]);
@@ -483,6 +588,7 @@ static int SSE4x4SSE2(const uint8_t* a, const uint8_t* b) {
const __m128i sum0 = _mm_add_epi32(madd0, madd1);
const __m128i sum1 = _mm_add_epi32(madd2, madd3);
const __m128i sum2 = _mm_add_epi32(sum0, sum1);
+
int32_t tmp[4];
_mm_storeu_si128((__m128i*)tmp, sum2);
return (tmp[3] + tmp[2] + tmp[1] + tmp[0]);
@@ -502,8 +608,6 @@ static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
int32_t sum[4];
__m128i tmp_0, tmp_1, tmp_2, tmp_3;
const __m128i zero = _mm_setzero_si128();
- const __m128i one = _mm_set1_epi16(1);
- const __m128i three = _mm_set1_epi16(3);
// Load, combine and tranpose inputs.
{
@@ -550,17 +654,14 @@ static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
// Horizontal pass and subsequent transpose.
{
// Calculate a and b (two 4x4 at once).
- const __m128i a0 = _mm_slli_epi16(_mm_add_epi16(tmp_0, tmp_2), 2);
- const __m128i a1 = _mm_slli_epi16(_mm_add_epi16(tmp_1, tmp_3), 2);
- const __m128i a2 = _mm_slli_epi16(_mm_sub_epi16(tmp_1, tmp_3), 2);
- const __m128i a3 = _mm_slli_epi16(_mm_sub_epi16(tmp_0, tmp_2), 2);
- // b0_extra = (a0 != 0);
- const __m128i b0_extra = _mm_andnot_si128(_mm_cmpeq_epi16 (a0, zero), one);
- const __m128i b0_base = _mm_add_epi16(a0, a1);
+ const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);
+ const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);
+ const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);
+ const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);
+ const __m128i b0 = _mm_add_epi16(a0, a1);
const __m128i b1 = _mm_add_epi16(a3, a2);
const __m128i b2 = _mm_sub_epi16(a3, a2);
const __m128i b3 = _mm_sub_epi16(a0, a1);
- const __m128i b0 = _mm_add_epi16(b0_base, b0_extra);
// a00 a01 a02 a03 b00 b01 b02 b03
// a10 a11 a12 a13 b10 b11 b12 b13
// a20 a21 a22 a23 b20 b21 b22 b23
@@ -635,19 +736,6 @@ static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
B_b2 = _mm_sub_epi16(B_b2, sign_B_b2);
}
- // b = abs(b) + 3
- A_b0 = _mm_add_epi16(A_b0, three);
- A_b2 = _mm_add_epi16(A_b2, three);
- B_b0 = _mm_add_epi16(B_b0, three);
- B_b2 = _mm_add_epi16(B_b2, three);
-
- // abs((b + (b<0) + 3) >> 3) = (abs(b) + 3) >> 3
- // b = (abs(b) + 3) >> 3
- A_b0 = _mm_srai_epi16(A_b0, 3);
- A_b2 = _mm_srai_epi16(A_b2, 3);
- B_b0 = _mm_srai_epi16(B_b0, 3);
- B_b2 = _mm_srai_epi16(B_b2, 3);
-
// weighted sums
A_b0 = _mm_madd_epi16(A_b0, w_0);
A_b2 = _mm_madd_epi16(A_b2, w_8);
@@ -666,7 +754,7 @@ static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
static int Disto4x4SSE2(const uint8_t* const a, const uint8_t* const b,
const uint16_t* const w) {
const int diff_sum = TTransformSSE2(a, b, w);
- return (abs(diff_sum) + 8) >> 4;
+ return abs(diff_sum) >> 5;
}
static int Disto16x16SSE2(const uint8_t* const a, const uint8_t* const b,
@@ -681,7 +769,6 @@ static int Disto16x16SSE2(const uint8_t* const a, const uint8_t* const b,
return D;
}
-
//------------------------------------------------------------------------------
// Quantization
//
@@ -690,8 +777,7 @@ static int Disto16x16SSE2(const uint8_t* const a, const uint8_t* const b,
static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
int n, const VP8Matrix* const mtx) {
const __m128i max_coeff_2047 = _mm_set1_epi16(2047);
- const __m128i zero = _mm_set1_epi16(0);
- __m128i sign0, sign8;
+ const __m128i zero = _mm_setzero_si128();
__m128i coeff0, coeff8;
__m128i out0, out8;
__m128i packed_out;
@@ -713,8 +799,8 @@ static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
const __m128i zthresh8 = _mm_loadu_si128((__m128i*)&mtx->zthresh_[8]);
// sign(in) = in >> 15 (0x0000 if positive, 0xffff if negative)
- sign0 = _mm_srai_epi16(in0, 15);
- sign8 = _mm_srai_epi16(in8, 15);
+ const __m128i sign0 = _mm_srai_epi16(in0, 15);
+ const __m128i sign8 = _mm_srai_epi16(in8, 15);
// coeff = abs(in) = (in ^ sign) - sign
coeff0 = _mm_xor_si128(in0, sign0);
@@ -819,19 +905,28 @@ static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
}
}
+#endif // WEBP_USE_SSE2
+
+//------------------------------------------------------------------------------
+// Entry point
+
extern void VP8EncDspInitSSE2(void);
+
void VP8EncDspInitSSE2(void) {
+#if defined(WEBP_USE_SSE2)
VP8CollectHistogram = CollectHistogramSSE2;
VP8EncQuantizeBlock = QuantizeBlockSSE2;
VP8ITransform = ITransformSSE2;
VP8FTransform = FTransformSSE2;
+ VP8SSE16x16 = SSE16x16SSE2;
+ VP8SSE16x8 = SSE16x8SSE2;
+ VP8SSE8x8 = SSE8x8SSE2;
VP8SSE4x4 = SSE4x4SSE2;
VP8TDisto4x4 = Disto4x4SSE2;
VP8TDisto16x16 = Disto16x16SSE2;
+#endif // WEBP_USE_SSE2
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
-
-#endif // WEBP_USE_SSE2
diff --git a/src/dsp/lossless.c b/src/dsp/lossless.c
index 6d3094fd..f951b897 100644
--- a/src/dsp/lossless.c
+++ b/src/dsp/lossless.c
@@ -11,8 +11,6 @@
// Jyrki Alakuijala (jyrki@google.com)
// Urvang Joshi (urvang@google.com)
-#define ANDROID_WEBP_RGB
-
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
@@ -23,7 +21,6 @@ extern "C" {
#include "../dec/vp8li.h"
#include "../dsp/yuv.h"
#include "../dsp/dsp.h"
-#include "../enc/histogram.h"
#define MAX_DIFF_COST (1e30f)
@@ -1036,12 +1033,14 @@ static void ConvertBGRAToRGBA4444(const uint32_t* src,
const uint32_t* const src_end = src + num_pixels;
while (src < src_end) {
const uint32_t argb = *src++;
-#ifdef ANDROID_WEBP_RGB
- *dst++ = ((argb >> 0) & 0xf0) | ((argb >> 28) & 0xf);
- *dst++ = ((argb >> 16) & 0xf0) | ((argb >> 12) & 0xf);
+ const uint8_t rg = ((argb >> 16) & 0xf0) | ((argb >> 12) & 0xf);
+ const uint8_t ba = ((argb >> 0) & 0xf0) | ((argb >> 28) & 0xf);
+#ifdef WEBP_SWAP_16BIT_CSP
+ *dst++ = ba;
+ *dst++ = rg;
#else
- *dst++ = ((argb >> 16) & 0xf0) | ((argb >> 12) & 0xf);
- *dst++ = ((argb >> 0) & 0xf0) | ((argb >> 28) & 0xf);
+ *dst++ = rg;
+ *dst++ = ba;
#endif
}
}
@@ -1051,12 +1050,14 @@ static void ConvertBGRAToRGB565(const uint32_t* src,
const uint32_t* const src_end = src + num_pixels;
while (src < src_end) {
const uint32_t argb = *src++;
-#ifdef ANDROID_WEBP_RGB
- *dst++ = ((argb >> 5) & 0xe0) | ((argb >> 3) & 0x1f);
- *dst++ = ((argb >> 16) & 0xf8) | ((argb >> 13) & 0x7);
+ const uint8_t rg = ((argb >> 16) & 0xf8) | ((argb >> 13) & 0x7);
+ const uint8_t gb = ((argb >> 5) & 0xe0) | ((argb >> 3) & 0x1f);
+#ifdef WEBP_SWAP_16BIT_CSP
+ *dst++ = gb;
+ *dst++ = rg;
#else
- *dst++ = ((argb >> 16) & 0xf8) | ((argb >> 13) & 0x7);
- *dst++ = ((argb >> 5) & 0xe0) | ((argb >> 3) & 0x1f);
+ *dst++ = rg;
+ *dst++ = gb;
#endif
}
}
diff --git a/src/dsp/upsampling.c b/src/dsp/upsampling.c
index 4855eb14..91d939cd 100644
--- a/src/dsp/upsampling.c
+++ b/src/dsp/upsampling.c
@@ -328,6 +328,11 @@ void WebPInitUpsamplers(void) {
WebPInitUpsamplersSSE2();
}
#endif
+#if defined(WEBP_USE_NEON)
+ if (VP8GetCPUInfo(kNEON)) {
+ WebPInitUpsamplersNEON();
+ }
+#endif
}
#endif // FANCY_UPSAMPLING
}
@@ -348,6 +353,11 @@ void WebPInitPremultiply(void) {
WebPInitPremultiplySSE2();
}
#endif
+#if defined(WEBP_USE_NEON)
+ if (VP8GetCPUInfo(kNEON)) {
+ WebPInitPremultiplyNEON();
+ }
+#endif
}
#endif // FANCY_UPSAMPLING
}
diff --git a/src/dsp/upsampling_neon.c b/src/dsp/upsampling_neon.c
new file mode 100644
index 00000000..00e2f892
--- /dev/null
+++ b/src/dsp/upsampling_neon.c
@@ -0,0 +1,292 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// NEON version of YUV to RGB upsampling functions.
+//
+// Author: mans@mansr.com (Mans Rullgard)
+// Based on SSE code by: somnath@google.com (Somnath Banerjee)
+
+#include "./dsp.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+#if defined(WEBP_USE_NEON)
+
+#include <assert.h>
+#include <arm_neon.h>
+#include <string.h>
+#include "./yuv.h"
+
+#ifdef FANCY_UPSAMPLING
+
+// Loads 9 pixels each from rows r1 and r2 and generates 16 pixels.
+#define UPSAMPLE_16PIXELS(r1, r2, out) { \
+ uint8x8_t a = vld1_u8(r1); \
+ uint8x8_t b = vld1_u8(r1 + 1); \
+ uint8x8_t c = vld1_u8(r2); \
+ uint8x8_t d = vld1_u8(r2 + 1); \
+ \
+ uint16x8_t al = vshll_n_u8(a, 1); \
+ uint16x8_t bl = vshll_n_u8(b, 1); \
+ uint16x8_t cl = vshll_n_u8(c, 1); \
+ uint16x8_t dl = vshll_n_u8(d, 1); \
+ \
+ uint8x8_t diag1, diag2; \
+ uint16x8_t sl; \
+ \
+ /* a + b + c + d */ \
+ sl = vaddl_u8(a, b); \
+ sl = vaddw_u8(sl, c); \
+ sl = vaddw_u8(sl, d); \
+ \
+ al = vaddq_u16(sl, al); /* 3a + b + c + d */ \
+ bl = vaddq_u16(sl, bl); /* a + 3b + c + d */ \
+ \
+ al = vaddq_u16(al, dl); /* 3a + b + c + 3d */ \
+ bl = vaddq_u16(bl, cl); /* a + 3b + 3c + d */ \
+ \
+ diag2 = vshrn_n_u16(al, 3); \
+ diag1 = vshrn_n_u16(bl, 3); \
+ \
+ a = vrhadd_u8(a, diag1); \
+ b = vrhadd_u8(b, diag2); \
+ c = vrhadd_u8(c, diag2); \
+ d = vrhadd_u8(d, diag1); \
+ \
+ { \
+ const uint8x8x2_t a_b = {{ a, b }}; \
+ const uint8x8x2_t c_d = {{ c, d }}; \
+ vst2_u8(out, a_b); \
+ vst2_u8(out + 32, c_d); \
+ } \
+}
+
+// Turn the macro into a function for reducing code-size when non-critical
+static void Upsample16Pixels(const uint8_t *r1, const uint8_t *r2,
+ uint8_t *out) {
+ UPSAMPLE_16PIXELS(r1, r2, out);
+}
+
+#define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \
+ uint8_t r1[9], r2[9]; \
+ memcpy(r1, (tb), (num_pixels)); \
+ memcpy(r2, (bb), (num_pixels)); \
+ /* replicate last byte */ \
+ memset(r1 + (num_pixels), r1[(num_pixels) - 1], 9 - (num_pixels)); \
+ memset(r2 + (num_pixels), r2[(num_pixels) - 1], 9 - (num_pixels)); \
+ Upsample16Pixels(r1, r2, out); \
+}
+
+#define CY 76283
+#define CVR 89858
+#define CUG 22014
+#define CVG 45773
+#define CUB 113618
+
+static const int16_t coef[4] = { CVR / 4, CUG, CVG / 2, CUB / 4 };
+
+#define CONVERT8(FMT, XSTEP, N, src_y, src_uv, out, cur_x) { \
+ int i; \
+ for (i = 0; i < N; i += 8) { \
+ int off = ((cur_x) + i) * XSTEP; \
+ uint8x8_t y = vld1_u8(src_y + (cur_x) + i); \
+ uint8x8_t u = vld1_u8((src_uv) + i); \
+ uint8x8_t v = vld1_u8((src_uv) + i + 16); \
+ int16x8_t yy = vreinterpretq_s16_u16(vsubl_u8(y, u16)); \
+ int16x8_t uu = vreinterpretq_s16_u16(vsubl_u8(u, u128)); \
+ int16x8_t vv = vreinterpretq_s16_u16(vsubl_u8(v, u128)); \
+ \
+ int16x8_t ud = vshlq_n_s16(uu, 1); \
+ int16x8_t vd = vshlq_n_s16(vv, 1); \
+ \
+ int32x4_t vrl = vqdmlal_lane_s16(vshll_n_s16(vget_low_s16(vv), 1), \
+ vget_low_s16(vd), cf16, 0); \
+ int32x4_t vrh = vqdmlal_lane_s16(vshll_n_s16(vget_high_s16(vv), 1), \
+ vget_high_s16(vd), cf16, 0); \
+ int16x8_t vr = vcombine_s16(vrshrn_n_s32(vrl, 16), \
+ vrshrn_n_s32(vrh, 16)); \
+ \
+ int32x4_t vl = vmovl_s16(vget_low_s16(vv)); \
+ int32x4_t vh = vmovl_s16(vget_high_s16(vv)); \
+ int32x4_t ugl = vmlal_lane_s16(vl, vget_low_s16(uu), cf16, 1); \
+ int32x4_t ugh = vmlal_lane_s16(vh, vget_high_s16(uu), cf16, 1); \
+ int32x4_t gcl = vqdmlal_lane_s16(ugl, vget_low_s16(vv), cf16, 2); \
+ int32x4_t gch = vqdmlal_lane_s16(ugh, vget_high_s16(vv), cf16, 2); \
+ int16x8_t gc = vcombine_s16(vrshrn_n_s32(gcl, 16), \
+ vrshrn_n_s32(gch, 16)); \
+ \
+ int32x4_t ubl = vqdmlal_lane_s16(vshll_n_s16(vget_low_s16(uu), 1), \
+ vget_low_s16(ud), cf16, 3); \
+ int32x4_t ubh = vqdmlal_lane_s16(vshll_n_s16(vget_high_s16(uu), 1), \
+ vget_high_s16(ud), cf16, 3); \
+ int16x8_t ub = vcombine_s16(vrshrn_n_s32(ubl, 16), \
+ vrshrn_n_s32(ubh, 16)); \
+ \
+ int32x4_t rl = vaddl_s16(vget_low_s16(yy), vget_low_s16(vr)); \
+ int32x4_t rh = vaddl_s16(vget_high_s16(yy), vget_high_s16(vr)); \
+ int32x4_t gl = vsubl_s16(vget_low_s16(yy), vget_low_s16(gc)); \
+ int32x4_t gh = vsubl_s16(vget_high_s16(yy), vget_high_s16(gc)); \
+ int32x4_t bl = vaddl_s16(vget_low_s16(yy), vget_low_s16(ub)); \
+ int32x4_t bh = vaddl_s16(vget_high_s16(yy), vget_high_s16(ub)); \
+ \
+ rl = vmulq_lane_s32(rl, cf32, 0); \
+ rh = vmulq_lane_s32(rh, cf32, 0); \
+ gl = vmulq_lane_s32(gl, cf32, 0); \
+ gh = vmulq_lane_s32(gh, cf32, 0); \
+ bl = vmulq_lane_s32(bl, cf32, 0); \
+ bh = vmulq_lane_s32(bh, cf32, 0); \
+ \
+ y = vqmovun_s16(vcombine_s16(vrshrn_n_s32(rl, 16), \
+ vrshrn_n_s32(rh, 16))); \
+ u = vqmovun_s16(vcombine_s16(vrshrn_n_s32(gl, 16), \
+ vrshrn_n_s32(gh, 16))); \
+ v = vqmovun_s16(vcombine_s16(vrshrn_n_s32(bl, 16), \
+ vrshrn_n_s32(bh, 16))); \
+ STR_ ## FMT(out + off, y, u, v); \
+ } \
+}
+
+#define v255 vmov_n_u8(255)
+
+#define STR_Rgb(out, r, g, b) do { \
+ const uint8x8x3_t r_g_b = {{ r, g, b }}; \
+ vst3_u8(out, r_g_b); \
+} while (0)
+
+#define STR_Bgr(out, r, g, b) do { \
+ const uint8x8x3_t b_g_r = {{ b, g, r }}; \
+ vst3_u8(out, b_g_r); \
+} while (0)
+
+#define STR_Rgba(out, r, g, b) do { \
+ const uint8x8x4_t r_g_b_v255 = {{ r, g, b, v255 }}; \
+ vst4_u8(out, r_g_b_v255); \
+} while (0)
+
+#define STR_Bgra(out, r, g, b) do { \
+ const uint8x8x4_t b_g_r_v255 = {{ b, g, r, v255 }}; \
+ vst4_u8(out, b_g_r_v255); \
+} while (0)
+
+#define CONVERT1(FMT, XSTEP, N, src_y, src_uv, rgb, cur_x) { \
+ int i; \
+ for (i = 0; i < N; i++) { \
+ int off = ((cur_x) + i) * XSTEP; \
+ int y = src_y[(cur_x) + i]; \
+ int u = (src_uv)[i]; \
+ int v = (src_uv)[i + 16]; \
+ VP8YuvTo ## FMT(y, u, v, rgb + off); \
+ } \
+}
+
+#define CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, uv, \
+ top_dst, bottom_dst, cur_x, len) { \
+ if (top_y) { \
+ CONVERT8(FMT, XSTEP, len, top_y, uv, top_dst, cur_x) \
+ } \
+ if (bottom_y) { \
+ CONVERT8(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x) \
+ } \
+}
+
+#define CONVERT2RGB_1(FMT, XSTEP, top_y, bottom_y, uv, \
+ top_dst, bottom_dst, cur_x, len) { \
+ if (top_y) { \
+ CONVERT1(FMT, XSTEP, len, top_y, uv, top_dst, cur_x); \
+ } \
+ if (bottom_y) { \
+ CONVERT1(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x); \
+ } \
+}
+
+#define NEON_UPSAMPLE_FUNC(FUNC_NAME, FMT, XSTEP) \
+static void FUNC_NAME(const uint8_t *top_y, const uint8_t *bottom_y, \
+ const uint8_t *top_u, const uint8_t *top_v, \
+ const uint8_t *cur_u, const uint8_t *cur_v, \
+ uint8_t *top_dst, uint8_t *bottom_dst, int len) { \
+ int block; \
+ /* 16 byte aligned array to cache reconstructed u and v */ \
+ uint8_t uv_buf[2 * 32 + 15]; \
+ uint8_t *const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
+ const int uv_len = (len + 1) >> 1; \
+ /* 9 pixels must be read-able for each block */ \
+ const int num_blocks = (uv_len - 1) >> 3; \
+ const int leftover = uv_len - num_blocks * 8; \
+ const int last_pos = 1 + 16 * num_blocks; \
+ \
+ const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \
+ const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \
+ \
+ const int16x4_t cf16 = vld1_s16(coef); \
+ const int32x2_t cf32 = vmov_n_s32(CY); \
+ const uint8x8_t u16 = vmov_n_u8(16); \
+ const uint8x8_t u128 = vmov_n_u8(128); \
+ \
+ /* Treat the first pixel in regular way */ \
+ if (top_y) { \
+ const int u0 = (top_u[0] + u_diag) >> 1; \
+ const int v0 = (top_v[0] + v_diag) >> 1; \
+ VP8YuvTo ## FMT(top_y[0], u0, v0, top_dst); \
+ } \
+ if (bottom_y) { \
+ const int u0 = (cur_u[0] + u_diag) >> 1; \
+ const int v0 = (cur_v[0] + v_diag) >> 1; \
+ VP8YuvTo ## FMT(bottom_y[0], u0, v0, bottom_dst); \
+ } \
+ \
+ for (block = 0; block < num_blocks; ++block) { \
+ UPSAMPLE_16PIXELS(top_u, cur_u, r_uv); \
+ UPSAMPLE_16PIXELS(top_v, cur_v, r_uv + 16); \
+ CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, r_uv, \
+ top_dst, bottom_dst, 16 * block + 1, 16); \
+ top_u += 8; \
+ cur_u += 8; \
+ top_v += 8; \
+ cur_v += 8; \
+ } \
+ \
+ UPSAMPLE_LAST_BLOCK(top_u, cur_u, leftover, r_uv); \
+ UPSAMPLE_LAST_BLOCK(top_v, cur_v, leftover, r_uv + 16); \
+ CONVERT2RGB_1(FMT, XSTEP, top_y, bottom_y, r_uv, \
+ top_dst, bottom_dst, last_pos, len - last_pos); \
+}
+
+// NEON variants of the fancy upsampler.
+NEON_UPSAMPLE_FUNC(UpsampleRgbLinePairNEON, Rgb, 3)
+NEON_UPSAMPLE_FUNC(UpsampleBgrLinePairNEON, Bgr, 3)
+NEON_UPSAMPLE_FUNC(UpsampleRgbaLinePairNEON, Rgba, 4)
+NEON_UPSAMPLE_FUNC(UpsampleBgraLinePairNEON, Bgra, 4)
+
+#endif // FANCY_UPSAMPLING
+
+#endif // WEBP_USE_NEON
+
+//------------------------------------------------------------------------------
+
+extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
+
+void WebPInitUpsamplersNEON(void) {
+#if defined(WEBP_USE_NEON)
+ WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePairNEON;
+ WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePairNEON;
+ WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePairNEON;
+ WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePairNEON;
+#endif // WEBP_USE_NEON
+}
+
+void WebPInitPremultiplyNEON(void) {
+#if defined(WEBP_USE_NEON)
+ WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePairNEON;
+ WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePairNEON;
+#endif // WEBP_USE_NEON
+}
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/src/dsp/upsampling_sse2.c b/src/dsp/upsampling_sse2.c
index 8cb275a0..ba075d11 100644
--- a/src/dsp/upsampling_sse2.c
+++ b/src/dsp/upsampling_sse2.c
@@ -11,6 +11,10 @@
#include "./dsp.h"
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
#if defined(WEBP_USE_SSE2)
#include <assert.h>
@@ -18,10 +22,6 @@
#include <string.h>
#include "./yuv.h"
-#if defined(__cplusplus) || defined(c_plusplus)
-extern "C" {
-#endif
-
#ifdef FANCY_UPSAMPLING
// We compute (9*a + 3*b + 3*c + d + 8) / 16 as follows
@@ -51,12 +51,12 @@ extern "C" {
// pack and store two alterning pixel rows
#define PACK_AND_STORE(a, b, da, db, out) do { \
- const __m128i ta = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \
- const __m128i tb = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \
- const __m128i t1 = _mm_unpacklo_epi8(ta, tb); \
- const __m128i t2 = _mm_unpackhi_epi8(ta, tb); \
- _mm_store_si128(((__m128i*)(out)) + 0, t1); \
- _mm_store_si128(((__m128i*)(out)) + 1, t2); \
+ const __m128i t_a = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \
+ const __m128i t_b = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \
+ const __m128i t_1 = _mm_unpacklo_epi8(t_a, t_b); \
+ const __m128i t_2 = _mm_unpackhi_epi8(t_a, t_b); \
+ _mm_store_si128(((__m128i*)(out)) + 0, t_1); \
+ _mm_store_si128(((__m128i*)(out)) + 1, t_2); \
} while (0)
// Loads 17 pixels each from rows r1 and r2 and generates 32 pixels.
@@ -128,7 +128,7 @@ static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
const uint8_t* top_u, const uint8_t* top_v, \
const uint8_t* cur_u, const uint8_t* cur_v, \
uint8_t* top_dst, uint8_t* bottom_dst, int len) { \
- int b; \
+ int block; \
/* 16 byte aligned array to cache reconstructed u and v */ \
uint8_t uv_buf[4 * 32 + 15]; \
uint8_t* const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
@@ -154,11 +154,11 @@ static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
FUNC(bottom_y[0], u0, v0, bottom_dst); \
} \
\
- for (b = 0; b < num_blocks; ++b) { \
+ for (block = 0; block < num_blocks; ++block) { \
UPSAMPLE_32PIXELS(top_u, cur_u, r_uv + 0 * 32); \
UPSAMPLE_32PIXELS(top_v, cur_v, r_uv + 1 * 32); \
CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, r_uv, top_dst, bottom_dst, \
- 32 * b + 1, 32) \
+ 32 * block + 1, 32) \
top_u += 16; \
cur_u += 16; \
top_v += 16; \
@@ -184,26 +184,32 @@ SSE2_UPSAMPLE_FUNC(UpsampleBgraLinePairSSE2, VP8YuvToBgra, 4)
#undef CONVERT2RGB
#undef SSE2_UPSAMPLE_FUNC
+#endif // FANCY_UPSAMPLING
+
+#endif // WEBP_USE_SSE2
+
//------------------------------------------------------------------------------
extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
void WebPInitUpsamplersSSE2(void) {
+#if defined(WEBP_USE_SSE2)
WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePairSSE2;
WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePairSSE2;
WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePairSSE2;
WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePairSSE2;
+#endif // WEBP_USE_SSE2
}
void WebPInitPremultiplySSE2(void) {
+#if defined(WEBP_USE_SSE2)
WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePairSSE2;
WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePairSSE2;
+#endif // WEBP_USE_SSE2
}
-#endif // FANCY_UPSAMPLING
-
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
-#endif // WEBP_USE_SSE2
+
diff --git a/src/dsp/yuv.c b/src/dsp/yuv.c
index 7f05f9a3..38895281 100644
--- a/src/dsp/yuv.c
+++ b/src/dsp/yuv.c
@@ -33,6 +33,7 @@ void VP8YUVInit(void) {
if (done) {
return;
}
+#ifndef USE_YUVj
for (i = 0; i < 256; ++i) {
VP8kVToR[i] = (89858 * (i - 128) + YUV_HALF) >> YUV_FIX;
VP8kUToG[i] = -22014 * (i - 128) + YUV_HALF;
@@ -44,6 +45,20 @@ void VP8YUVInit(void) {
VP8kClip[i - YUV_RANGE_MIN] = clip(k, 255);
VP8kClip4Bits[i - YUV_RANGE_MIN] = clip((k + 8) >> 4, 15);
}
+#else
+ for (i = 0; i < 256; ++i) {
+ VP8kVToR[i] = (91881 * (i - 128) + YUV_HALF) >> YUV_FIX;
+ VP8kUToG[i] = -22554 * (i - 128) + YUV_HALF;
+ VP8kVToG[i] = -46802 * (i - 128);
+ VP8kUToB[i] = (116130 * (i - 128) + YUV_HALF) >> YUV_FIX;
+ }
+ for (i = YUV_RANGE_MIN; i < YUV_RANGE_MAX; ++i) {
+ const int k = i;
+ VP8kClip[i - YUV_RANGE_MIN] = clip(k, 255);
+ VP8kClip4Bits[i - YUV_RANGE_MIN] = clip((k + 8) >> 4, 15);
+ }
+#endif
+
done = 1;
}
diff --git a/src/dsp/yuv.h b/src/dsp/yuv.h
index ee3587e3..add167ea 100644
--- a/src/dsp/yuv.h
+++ b/src/dsp/yuv.h
@@ -7,6 +7,25 @@
//
// inline YUV<->RGB conversion function
//
+// The exact naming is Y'CbCr, following the ITU-R BT.601 standard.
+// More information at: http://en.wikipedia.org/wiki/YCbCr
+// Y = 0.2569 * R + 0.5044 * G + 0.0979 * B + 16
+// U = -0.1483 * R - 0.2911 * G + 0.4394 * B + 128
+// V = 0.4394 * R - 0.3679 * G - 0.0715 * B + 128
+// We use 16bit fixed point operations for RGB->YUV conversion.
+//
+// For the Y'CbCr to RGB conversion, the BT.601 specification reads:
+// R = 1.164 * (Y-16) + 1.596 * (V-128)
+// G = 1.164 * (Y-16) - 0.813 * (V-128) - 0.391 * (U-128)
+// B = 1.164 * (Y-16) + 2.018 * (U-128)
+// where Y is in the [16,235] range, and U/V in the [16,240] range.
+// But the common term 1.164 * (Y-16) can be handled as an offset in the
+// VP8kClip[] table. So the formulae should be read as:
+// R = 1.164 * [Y + 1.371 * (V-128) ] - 18.624
+// G = 1.164 * [Y - 0.698 * (V-128) - 0.336 * (U-128)] - 18.624
+// B = 1.164 * [Y + 1.733 * (U-128)] - 18.624
+// once factorized. Here too, 16bit fixed precision is used.
+//
// Author: Skal (pascal.massimino@gmail.com)
#ifndef WEBP_DSP_YUV_H_
@@ -14,13 +33,15 @@
#include "../dec/decode_vp8.h"
-/*
- * Define ANDROID_WEBP_RGB to enable specific optimizations for Android
- * RGBA_4444 & RGB_565 color support.
- *
- */
-
-#define ANDROID_WEBP_RGB
+#if defined(WEBP_EXPERIMENTAL_FEATURES)
+// Do NOT activate this feature for real compression. This is only experimental!
+// This flag is for comparison purpose against JPEG's "YUVj" natural colorspace.
+// This colorspace is close to Rec.601's Y'CbCr model with the notable
+// difference of allowing larger range for luma/chroma.
+// See http://en.wikipedia.org/wiki/YCbCr#JPEG_conversion paragraph, and its
+// difference with http://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion
+// #define USE_YUVj
+#endif
//------------------------------------------------------------------------------
// YUV -> RGB conversion
@@ -53,16 +74,16 @@ static WEBP_INLINE void VP8YuvToRgb565(uint8_t y, uint8_t u, uint8_t v,
const int r_off = VP8kVToR[v];
const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
const int b_off = VP8kUToB[u];
-#ifdef ANDROID_WEBP_RGB
- rgb[1] = ((VP8kClip[y + r_off - YUV_RANGE_MIN] & 0xf8) |
- (VP8kClip[y + g_off - YUV_RANGE_MIN] >> 5));
- rgb[0] = (((VP8kClip[y + g_off - YUV_RANGE_MIN] << 3) & 0xe0) |
- (VP8kClip[y + b_off - YUV_RANGE_MIN] >> 3));
+ const uint8_t rg = ((VP8kClip[y + r_off - YUV_RANGE_MIN] & 0xf8) |
+ (VP8kClip[y + g_off - YUV_RANGE_MIN] >> 5));
+ const uint8_t gb = (((VP8kClip[y + g_off - YUV_RANGE_MIN] << 3) & 0xe0) |
+ (VP8kClip[y + b_off - YUV_RANGE_MIN] >> 3));
+#ifdef WEBP_SWAP_16BIT_CSP
+ rgb[0] = gb;
+ rgb[1] = rg;
#else
- rgb[0] = ((VP8kClip[y + r_off - YUV_RANGE_MIN] & 0xf8) |
- (VP8kClip[y + g_off - YUV_RANGE_MIN] >> 5));
- rgb[1] = (((VP8kClip[y + g_off - YUV_RANGE_MIN] << 3) & 0xe0) |
- (VP8kClip[y + b_off - YUV_RANGE_MIN] >> 3));
+ rgb[0] = rg;
+ rgb[1] = gb;
#endif
}
@@ -77,14 +98,15 @@ static WEBP_INLINE void VP8YuvToRgba4444(uint8_t y, uint8_t u, uint8_t v,
const int r_off = VP8kVToR[v];
const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
const int b_off = VP8kUToB[u];
-#ifdef ANDROID_WEBP_RGB
- argb[1] = ((VP8kClip4Bits[y + r_off - YUV_RANGE_MIN] << 4) |
- VP8kClip4Bits[y + g_off - YUV_RANGE_MIN]);
- argb[0] = 0x0f | (VP8kClip4Bits[y + b_off - YUV_RANGE_MIN] << 4);
+ const uint8_t rg = ((VP8kClip4Bits[y + r_off - YUV_RANGE_MIN] << 4) |
+ VP8kClip4Bits[y + g_off - YUV_RANGE_MIN]);
+ const uint8_t ba = (VP8kClip4Bits[y + b_off - YUV_RANGE_MIN] << 4) | 0x0f;
+#ifdef WEBP_SWAP_16BIT_CSP
+ argb[0] = ba;
+ argb[1] = rg;
#else
- argb[0] = ((VP8kClip4Bits[y + r_off - YUV_RANGE_MIN] << 4) |
- VP8kClip4Bits[y + g_off - YUV_RANGE_MIN]);
- argb[1] = 0x0f | (VP8kClip4Bits[y + b_off - YUV_RANGE_MIN] << 4);
+ argb[0] = rg;
+ argb[1] = ba;
#endif
}
@@ -115,18 +137,14 @@ void VP8YUVInit(void);
//------------------------------------------------------------------------------
// RGB -> YUV conversion
-// The exact naming is Y'CbCr, following the ITU-R BT.601 standard.
-// More information at: http://en.wikipedia.org/wiki/YCbCr
-// Y = 0.2569 * R + 0.5044 * G + 0.0979 * B + 16
-// U = -0.1483 * R - 0.2911 * G + 0.4394 * B + 128
-// V = 0.4394 * R - 0.3679 * G - 0.0715 * B + 128
-// We use 16bit fixed point operations.
static WEBP_INLINE int VP8ClipUV(int v) {
- v = (v + (257 << (YUV_FIX + 2 - 1))) >> (YUV_FIX + 2);
- return ((v & ~0xff) == 0) ? v : (v < 0) ? 0 : 255;
+ v = (v + (257 << (YUV_FIX + 2 - 1))) >> (YUV_FIX + 2);
+ return ((v & ~0xff) == 0) ? v : (v < 0) ? 0 : 255;
}
+#ifndef USE_YUVj
+
static WEBP_INLINE int VP8RGBToY(int r, int g, int b) {
const int kRound = (1 << (YUV_FIX - 1)) + (16 << YUV_FIX);
const int luma = 16839 * r + 33059 * g + 6420 * b;
@@ -134,13 +152,38 @@ static WEBP_INLINE int VP8RGBToY(int r, int g, int b) {
}
static WEBP_INLINE int VP8RGBToU(int r, int g, int b) {
- return VP8ClipUV(-9719 * r - 19081 * g + 28800 * b);
+ const int u = -9719 * r - 19081 * g + 28800 * b;
+ return VP8ClipUV(u);
+}
+
+static WEBP_INLINE int VP8RGBToV(int r, int g, int b) {
+ const int v = +28800 * r - 24116 * g - 4684 * b;
+ return VP8ClipUV(v);
+}
+
+#else
+
+// This JPEG-YUV colorspace, only for comparison!
+// These are also 16-bit precision coefficients from Rec.601, but with full
+// [0..255] output range.
+static WEBP_INLINE int VP8RGBToY(int r, int g, int b) {
+ const int kRound = (1 << (YUV_FIX - 1));
+ const int luma = 19595 * r + 38470 * g + 7471 * b;
+ return (luma + kRound) >> YUV_FIX; // no need to clip
+}
+
+static WEBP_INLINE int VP8RGBToU(int r, int g, int b) {
+ const int u = -11058 * r - 21710 * g + 32768 * b;
+ return VP8ClipUV(u);
}
static WEBP_INLINE int VP8RGBToV(int r, int g, int b) {
- return VP8ClipUV(+28800 * r - 24116 * g - 4684 * b);
+ const int v = 32768 * r - 27439 * g - 5329 * b;
+ return VP8ClipUV(v);
}
+#endif // USE_YUVj
+
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
diff --git a/src/enc/Android.mk b/src/enc/Android.mk
index 7f38d40d..c9000acd 100644
--- a/src/enc/Android.mk
+++ b/src/enc/Android.mk
@@ -33,16 +33,12 @@ LOCAL_SRC_FILES := \
vp8l.c \
webpenc.c \
../dsp/cpu.c \
- ../dsp/dec.c \
- ../dsp/dec_neon.c \
- ../dsp/dec_sse2.c \
+ ../dsp/cpu-features.c \
../dsp/enc.c \
+ ../dsp/enc_neon.c \
../dsp/enc_sse2.c \
../dsp/lossless.c \
- ../dsp/upsampling.c \
- ../dsp/upsampling_sse2.c \
../dsp/yuv.c \
- ../utils/bit_reader.c \
../utils/bit_writer.c \
../utils/color_cache.c \
../utils/filters.c \
@@ -53,7 +49,7 @@ LOCAL_SRC_FILES := \
../utils/thread.c \
../utils/utils.c
-LOCAL_CFLAGS := -DANDROID
+LOCAL_CFLAGS := -DANDROID -DWEBP_SWAP_16BIT_CSP
LOCAL_C_INCLUDES += \
$(LOCAL_PATH) \
diff --git a/src/enc/alpha.c b/src/enc/alpha.c
index 376f786c..c34ad17c 100644
--- a/src/enc/alpha.c
+++ b/src/enc/alpha.c
@@ -79,18 +79,17 @@ static int EncodeLossless(const uint8_t* const data, int width, int height,
WebPConfigInit(&config);
config.lossless = 1;
config.method = effort_level; // impact is very small
- // Set moderate default quality setting for alpha. Higher qualities (80 and
- // above) could be very slow.
- config.quality = 10.f + 15.f * effort_level;
- if (config.quality > 100.f) config.quality = 100.f;
+ // Set a moderate default quality setting for alpha.
+ config.quality = 6.f * effort_level;
+ assert(config.quality >= 0 && config.quality <= 100.f);
ok = VP8LBitWriterInit(&tmp_bw, (width * height) >> 3);
ok = ok && (VP8LEncodeStream(&config, &picture, &tmp_bw) == VP8_ENC_OK);
WebPPictureFree(&picture);
if (ok) {
- const uint8_t* const data = VP8LBitWriterFinish(&tmp_bw);
- const size_t data_size = VP8LBitWriterNumBytes(&tmp_bw);
- VP8BitWriterAppend(bw, data, data_size);
+ const uint8_t* const buffer = VP8LBitWriterFinish(&tmp_bw);
+ const size_t buffer_size = VP8LBitWriterNumBytes(&tmp_bw);
+ VP8BitWriterAppend(bw, buffer, buffer_size);
}
VP8LBitWriterDestroy(&tmp_bw);
return ok && !bw->error_;
diff --git a/src/enc/analysis.c b/src/enc/analysis.c
index 22cfb492..06142207 100644
--- a/src/enc/analysis.c
+++ b/src/enc/analysis.c
@@ -23,10 +23,6 @@ extern "C" {
#define MAX_ITERS_K_MEANS 6
-static int ClipAlpha(int alpha) {
- return alpha < 0 ? 0 : alpha > 255 ? 255 : alpha;
-}
-
//------------------------------------------------------------------------------
// Smooth the segment map by replacing isolated block by the majority of its
// neighbours.
@@ -72,50 +68,10 @@ static void SmoothSegmentMap(VP8Encoder* const enc) {
}
//------------------------------------------------------------------------------
-// Finalize Segment probability based on the coding tree
-
-static int GetProba(int a, int b) {
- int proba;
- const int total = a + b;
- if (total == 0) return 255; // that's the default probability.
- proba = (255 * a + total / 2) / total;
- return proba;
-}
-
-static void SetSegmentProbas(VP8Encoder* const enc) {
- int p[NUM_MB_SEGMENTS] = { 0 };
- int n;
-
- for (n = 0; n < enc->mb_w_ * enc->mb_h_; ++n) {
- const VP8MBInfo* const mb = &enc->mb_info_[n];
- p[mb->segment_]++;
- }
- if (enc->pic_->stats) {
- for (n = 0; n < NUM_MB_SEGMENTS; ++n) {
- enc->pic_->stats->segment_size[n] = p[n];
- }
- }
- if (enc->segment_hdr_.num_segments_ > 1) {
- uint8_t* const probas = enc->proba_.segments_;
- probas[0] = GetProba(p[0] + p[1], p[2] + p[3]);
- probas[1] = GetProba(p[0], p[1]);
- probas[2] = GetProba(p[2], p[3]);
-
- enc->segment_hdr_.update_map_ =
- (probas[0] != 255) || (probas[1] != 255) || (probas[2] != 255);
- enc->segment_hdr_.size_ =
- p[0] * (VP8BitCost(0, probas[0]) + VP8BitCost(0, probas[1])) +
- p[1] * (VP8BitCost(0, probas[0]) + VP8BitCost(1, probas[1])) +
- p[2] * (VP8BitCost(1, probas[0]) + VP8BitCost(0, probas[2])) +
- p[3] * (VP8BitCost(1, probas[0]) + VP8BitCost(1, probas[2]));
- } else {
- enc->segment_hdr_.update_map_ = 0;
- enc->segment_hdr_.size_ = 0;
- }
-}
+// set segment susceptibility alpha_ / beta_
static WEBP_INLINE int clip(int v, int m, int M) {
- return v < m ? m : v > M ? M : v;
+ return (v < m) ? m : (v > M) ? M : v;
}
static void SetSegmentAlphas(VP8Encoder* const enc,
@@ -142,22 +98,63 @@ static void SetSegmentAlphas(VP8Encoder* const enc,
}
//------------------------------------------------------------------------------
+// Compute susceptibility based on DCT-coeff histograms:
+// the higher, the "easier" the macroblock is to compress.
+
+#define MAX_ALPHA 255 // 8b of precision for susceptibilities.
+#define ALPHA_SCALE (2 * MAX_ALPHA) // scaling factor for alpha.
+#define DEFAULT_ALPHA (-1)
+#define IS_BETTER_ALPHA(alpha, best_alpha) ((alpha) > (best_alpha))
+
+static int FinalAlphaValue(int alpha) {
+ alpha = MAX_ALPHA - alpha;
+ return clip(alpha, 0, MAX_ALPHA);
+}
+
+static int GetAlpha(const VP8Histogram* const histo) {
+ int max_value = 0, last_non_zero = 1;
+ int k;
+ int alpha;
+ for (k = 0; k <= MAX_COEFF_THRESH; ++k) {
+ const int value = histo->distribution[k];
+ if (value > 0) {
+ if (value > max_value) max_value = value;
+ last_non_zero = k;
+ }
+ }
+ // 'alpha' will later be clipped to [0..MAX_ALPHA] range, clamping outer
+ // values which happen to be mostly noise. This leaves the maximum precision
+ // for handling the useful small values which contribute most.
+ alpha = (max_value > 1) ? ALPHA_SCALE * last_non_zero / max_value : 0;
+ return alpha;
+}
+
+static void MergeHistograms(const VP8Histogram* const in,
+ VP8Histogram* const out) {
+ int i;
+ for (i = 0; i <= MAX_COEFF_THRESH; ++i) {
+ out->distribution[i] += in->distribution[i];
+ }
+}
+
+//------------------------------------------------------------------------------
// Simplified k-Means, to assign Nb segments based on alpha-histogram
-static void AssignSegments(VP8Encoder* const enc, const int alphas[256]) {
+static void AssignSegments(VP8Encoder* const enc,
+ const int alphas[MAX_ALPHA + 1]) {
const int nb = enc->segment_hdr_.num_segments_;
int centers[NUM_MB_SEGMENTS];
int weighted_average = 0;
- int map[256];
+ int map[MAX_ALPHA + 1];
int a, n, k;
- int min_a = 0, max_a = 255, range_a;
+ int min_a = 0, max_a = MAX_ALPHA, range_a;
// 'int' type is ok for histo, and won't overflow
int accum[NUM_MB_SEGMENTS], dist_accum[NUM_MB_SEGMENTS];
// bracket the input
- for (n = 0; n < 256 && alphas[n] == 0; ++n) {}
+ for (n = 0; n <= MAX_ALPHA && alphas[n] == 0; ++n) {}
min_a = n;
- for (n = 255; n > min_a && alphas[n] == 0; --n) {}
+ for (n = MAX_ALPHA; n > min_a && alphas[n] == 0; --n) {}
max_a = n;
range_a = max_a - min_a;
@@ -210,7 +207,7 @@ static void AssignSegments(VP8Encoder* const enc, const int alphas[256]) {
VP8MBInfo* const mb = &enc->mb_info_[n];
const int alpha = mb->alpha_;
mb->segment_ = map[alpha];
- mb->alpha_ = centers[map[alpha]]; // just for the record.
+ mb->alpha_ = centers[map[alpha]]; // for the record.
}
if (nb > 1) {
@@ -218,7 +215,6 @@ static void AssignSegments(VP8Encoder* const enc, const int alphas[256]) {
if (smooth) SmoothSegmentMap(enc);
}
- SetSegmentProbas(enc); // Assign final proba
SetSegmentAlphas(enc, centers, weighted_average); // pick some alphas.
}
@@ -236,15 +232,19 @@ static void AssignSegments(VP8Encoder* const enc, const int alphas[256]) {
static int MBAnalyzeBestIntra16Mode(VP8EncIterator* const it) {
const int max_mode = (it->enc_->method_ >= 3) ? MAX_INTRA16_MODE : 4;
int mode;
- int best_alpha = -1;
+ int best_alpha = DEFAULT_ALPHA;
int best_mode = 0;
VP8MakeLuma16Preds(it);
for (mode = 0; mode < max_mode; ++mode) {
- const int alpha = VP8CollectHistogram(it->yuv_in_ + Y_OFF,
- it->yuv_p_ + VP8I16ModeOffsets[mode],
- 0, 16);
- if (alpha > best_alpha) {
+ VP8Histogram histo = { { 0 } };
+ int alpha;
+
+ VP8CollectHistogram(it->yuv_in_ + Y_OFF,
+ it->yuv_p_ + VP8I16ModeOffsets[mode],
+ 0, 16, &histo);
+ alpha = GetAlpha(&histo);
+ if (IS_BETTER_ALPHA(alpha, best_alpha)) {
best_alpha = alpha;
best_mode = mode;
}
@@ -257,45 +257,58 @@ static int MBAnalyzeBestIntra4Mode(VP8EncIterator* const it,
int best_alpha) {
uint8_t modes[16];
const int max_mode = (it->enc_->method_ >= 3) ? MAX_INTRA4_MODE : NUM_BMODES;
- int i4_alpha = 0;
+ int i4_alpha;
+ VP8Histogram total_histo = { { 0 } };
+ int cur_histo = 0;
+
VP8IteratorStartI4(it);
do {
int mode;
- int best_mode_alpha = -1;
+ int best_mode_alpha = DEFAULT_ALPHA;
+ VP8Histogram histos[2];
const uint8_t* const src = it->yuv_in_ + Y_OFF + VP8Scan[it->i4_];
VP8MakeIntra4Preds(it);
for (mode = 0; mode < max_mode; ++mode) {
- const int alpha = VP8CollectHistogram(src,
- it->yuv_p_ + VP8I4ModeOffsets[mode],
- 0, 1);
- if (alpha > best_mode_alpha) {
+ int alpha;
+
+ memset(&histos[cur_histo], 0, sizeof(histos[cur_histo]));
+ VP8CollectHistogram(src, it->yuv_p_ + VP8I4ModeOffsets[mode],
+ 0, 1, &histos[cur_histo]);
+ alpha = GetAlpha(&histos[cur_histo]);
+ if (IS_BETTER_ALPHA(alpha, best_mode_alpha)) {
best_mode_alpha = alpha;
modes[it->i4_] = mode;
+ cur_histo ^= 1; // keep track of best histo so far.
}
}
- i4_alpha += best_mode_alpha;
+ // accumulate best histogram
+ MergeHistograms(&histos[cur_histo ^ 1], &total_histo);
// Note: we reuse the original samples for predictors
} while (VP8IteratorRotateI4(it, it->yuv_in_ + Y_OFF));
- if (i4_alpha > best_alpha) {
+ i4_alpha = GetAlpha(&total_histo);
+ if (IS_BETTER_ALPHA(i4_alpha, best_alpha)) {
VP8SetIntra4Mode(it, modes);
- best_alpha = ClipAlpha(i4_alpha);
+ best_alpha = i4_alpha;
}
return best_alpha;
}
static int MBAnalyzeBestUVMode(VP8EncIterator* const it) {
- int best_alpha = -1;
+ int best_alpha = DEFAULT_ALPHA;
int best_mode = 0;
const int max_mode = (it->enc_->method_ >= 3) ? MAX_UV_MODE : 4;
int mode;
VP8MakeChroma8Preds(it);
for (mode = 0; mode < max_mode; ++mode) {
- const int alpha = VP8CollectHistogram(it->yuv_in_ + U_OFF,
- it->yuv_p_ + VP8UVModeOffsets[mode],
- 16, 16 + 4 + 4);
- if (alpha > best_alpha) {
+ VP8Histogram histo = { { 0 } };
+ int alpha;
+ VP8CollectHistogram(it->yuv_in_ + U_OFF,
+ it->yuv_p_ + VP8UVModeOffsets[mode],
+ 16, 16 + 4 + 4, &histo);
+ alpha = GetAlpha(&histo);
+ if (IS_BETTER_ALPHA(alpha, best_alpha)) {
best_alpha = alpha;
best_mode = mode;
}
@@ -305,7 +318,7 @@ static int MBAnalyzeBestUVMode(VP8EncIterator* const it) {
}
static void MBAnalyze(VP8EncIterator* const it,
- int alphas[256], int* const uv_alpha) {
+ int alphas[MAX_ALPHA + 1], int* const uv_alpha) {
const VP8Encoder* const enc = it->enc_;
int best_alpha, best_uv_alpha;
@@ -324,10 +337,19 @@ static void MBAnalyze(VP8EncIterator* const it,
best_uv_alpha = MBAnalyzeBestUVMode(it);
// Final susceptibility mix
- best_alpha = (best_alpha + best_uv_alpha + 1) / 2;
+ best_alpha = (3 * best_alpha + best_uv_alpha + 2) >> 2;
+ best_alpha = FinalAlphaValue(best_alpha);
alphas[best_alpha]++;
*uv_alpha += best_uv_alpha;
- it->mb_->alpha_ = best_alpha; // Informative only.
+ it->mb_->alpha_ = best_alpha; // for later remapping.
+}
+
+static void DefaultMBInfo(VP8MBInfo* const mb) {
+ mb->type_ = 1; // I16x16
+ mb->uv_mode_ = 0;
+ mb->skip_ = 0; // not skipped
+ mb->segment_ = 0; // default segment
+ mb->alpha_ = 0;
}
//------------------------------------------------------------------------------
@@ -342,20 +364,34 @@ static void MBAnalyze(VP8EncIterator* const it,
int VP8EncAnalyze(VP8Encoder* const enc) {
int ok = 1;
- int alphas[256] = { 0 };
- VP8EncIterator it;
-
- VP8IteratorInit(enc, &it);
- enc->uv_alpha_ = 0;
- do {
- VP8IteratorImport(&it);
- MBAnalyze(&it, alphas, &enc->uv_alpha_);
- ok = VP8IteratorProgress(&it, 20);
- // Let's pretend we have perfect lossless reconstruction.
- } while (ok && VP8IteratorNext(&it, it.yuv_in_));
- enc->uv_alpha_ /= enc->mb_w_ * enc->mb_h_;
- if (ok) AssignSegments(enc, alphas);
-
+ const int do_segments =
+ (enc->segment_hdr_.num_segments_ > 1) ||
+ (enc->method_ <= 2); // for methods 0,1,2, we need preds_[] to be filled.
+ if (do_segments) {
+ int alphas[MAX_ALPHA + 1] = { 0 };
+ VP8EncIterator it;
+
+ VP8IteratorInit(enc, &it);
+ enc->uv_alpha_ = 0;
+ do {
+ VP8IteratorImport(&it);
+ MBAnalyze(&it, alphas, &enc->uv_alpha_);
+ ok = VP8IteratorProgress(&it, 20);
+ // Let's pretend we have perfect lossless reconstruction.
+ } while (ok && VP8IteratorNext(&it, it.yuv_in_));
+ enc->uv_alpha_ /= enc->mb_w_ * enc->mb_h_;
+ if (ok) AssignSegments(enc, alphas);
+ } else { // Use only one default segment.
+ int n;
+ for (n = 0; n < enc->mb_w_ * enc->mb_h_; ++n) {
+ DefaultMBInfo(&enc->mb_info_[n]);
+ }
+ // Default susceptibilities.
+ enc->dqm_[0].alpha_ = 0;
+ enc->dqm_[0].beta_ = 0;
+ enc->uv_alpha_ = 0; // we can't compute this one.
+ WebPReportProgress(enc->pic_, enc->percent_ + 20, &enc->percent_);
+ }
return ok;
}
diff --git a/src/enc/backward_references.c b/src/enc/backward_references.c
index b8c8ece8..8ffdd0ce 100644
--- a/src/enc/backward_references.c
+++ b/src/enc/backward_references.c
@@ -141,21 +141,35 @@ static void HashChainInsert(HashChain* const p,
p->hash_to_first_index_[hash_code] = pos;
}
+static void GetParamsForHashChainFindCopy(int quality, int xsize,
+ int* window_size, int* iter_pos,
+ int* iter_limit) {
+ const int iter_mult = (quality < 27) ? 1 : 1 + ((quality - 27) >> 4);
+ // Limit the backward-ref window size for lower qualities.
+ const int max_window_size = (quality > 50) ? WINDOW_SIZE
+ : (quality > 25) ? (xsize << 8)
+ : (xsize << 4);
+ assert(xsize > 0);
+ *window_size = (max_window_size > WINDOW_SIZE) ? WINDOW_SIZE
+ : max_window_size;
+ *iter_pos = 5 + (quality >> 3);
+ *iter_limit = -quality * iter_mult;
+}
+
static int HashChainFindCopy(const HashChain* const p,
- int quality, int index, int xsize,
+ int base_position, int xsize,
const uint32_t* const argb, int maxlen,
+ int window_size, int iter_pos, int iter_limit,
int* const distance_ptr,
int* const length_ptr) {
- const uint64_t hash_code = GetPixPairHash64(&argb[index]);
+ const uint64_t hash_code = GetPixPairHash64(&argb[base_position]);
int prev_length = 0;
int64_t best_val = 0;
int best_length = 0;
int best_distance = 0;
- const uint32_t* const argb_start = argb + index;
- const int iter_min_mult = (quality < 50) ? 2 : (quality < 75) ? 4 : 8;
- const int iter_min = -quality * iter_min_mult;
- int iter_cnt = 10 + (quality >> 1);
- const int min_pos = (index > WINDOW_SIZE) ? index - WINDOW_SIZE : 0;
+ const uint32_t* const argb_start = argb + base_position;
+ const int min_pos =
+ (base_position > window_size) ? base_position - window_size : 0;
int pos;
assert(xsize > 0);
@@ -164,12 +178,12 @@ static int HashChainFindCopy(const HashChain* const p,
pos = p->chain_[pos]) {
int64_t val;
int curr_length;
- if (iter_cnt < 0) {
- if (iter_cnt < iter_min || best_val >= 0xff0000) {
+ if (iter_pos < 0) {
+ if (iter_pos < iter_limit || best_val >= 0xff0000) {
break;
}
}
- --iter_cnt;
+ --iter_pos;
if (best_length != 0 &&
argb[pos + best_length - 1] != argb_start[best_length - 1]) {
continue;
@@ -180,9 +194,9 @@ static int HashChainFindCopy(const HashChain* const p,
}
val = 65536 * curr_length;
// Favoring 2d locality here gives savings for certain images.
- if (index - pos < 9 * xsize) {
- const int y = (index - pos) / xsize;
- int x = (index - pos) % xsize;
+ if (base_position - pos < 9 * xsize) {
+ const int y = (base_position - pos) / xsize;
+ int x = (base_position - pos) % xsize;
if (x > xsize / 2) {
x = xsize - x;
}
@@ -198,7 +212,7 @@ static int HashChainFindCopy(const HashChain* const p,
prev_length = curr_length;
best_val = val;
best_length = curr_length;
- best_distance = index - pos;
+ best_distance = base_position - pos;
if (curr_length >= MAX_LENGTH) {
break;
}
@@ -257,6 +271,9 @@ static int BackwardReferencesHashChain(int xsize, int ysize,
const int pix_count = xsize * ysize;
HashChain* const hash_chain = (HashChain*)malloc(sizeof(*hash_chain));
VP8LColorCache hashers;
+ int window_size = WINDOW_SIZE;
+ int iter_pos = 1;
+ int iter_limit = -1;
if (hash_chain == NULL) return 0;
if (use_color_cache) {
@@ -267,6 +284,8 @@ static int BackwardReferencesHashChain(int xsize, int ysize,
if (!HashChainInit(hash_chain, pix_count)) goto Error;
refs->size = 0;
+ GetParamsForHashChainFindCopy(quality, xsize, &window_size, &iter_pos,
+ &iter_limit);
for (i = 0; i < pix_count; ) {
// Alternative#1: Code the pixels starting at 'i' using backward reference.
int offset = 0;
@@ -276,7 +295,8 @@ static int BackwardReferencesHashChain(int xsize, int ysize,
if (maxlen > MAX_LENGTH) {
maxlen = MAX_LENGTH;
}
- HashChainFindCopy(hash_chain, quality, i, xsize, argb, maxlen,
+ HashChainFindCopy(hash_chain, i, xsize, argb, maxlen,
+ window_size, iter_pos, iter_limit,
&offset, &len);
}
if (len >= MIN_LENGTH) {
@@ -291,8 +311,9 @@ static int BackwardReferencesHashChain(int xsize, int ysize,
if (maxlen > MAX_LENGTH) {
maxlen = MAX_LENGTH;
}
- HashChainFindCopy(hash_chain, quality,
- i + 1, xsize, argb, maxlen, &offset2, &len2);
+ HashChainFindCopy(hash_chain, i + 1, xsize, argb, maxlen,
+ window_size, iter_pos, iter_limit,
+ &offset2, &len2);
if (len2 > len + 1) {
const uint32_t pixel = argb[i];
// Alternative#2 is a better match. So push pixel at 'i' as literal.
@@ -362,7 +383,8 @@ typedef struct {
static int BackwardReferencesTraceBackwards(
int xsize, int ysize, int recursive_cost_model,
- const uint32_t* const argb, int cache_bits, VP8LBackwardRefs* const refs);
+ const uint32_t* const argb, int quality, int cache_bits,
+ VP8LBackwardRefs* const refs);
static void ConvertPopulationCountTableToBitEstimates(
int num_symbols, const int population_counts[], double output[]) {
@@ -387,17 +409,16 @@ static void ConvertPopulationCountTableToBitEstimates(
static int CostModelBuild(CostModel* const m, int xsize, int ysize,
int recursion_level, const uint32_t* const argb,
- int cache_bits) {
+ int quality, int cache_bits) {
int ok = 0;
VP8LHistogram histo;
VP8LBackwardRefs refs;
- const int quality = 100;
if (!VP8LBackwardRefsAlloc(&refs, xsize * ysize)) goto Error;
if (recursion_level > 0) {
if (!BackwardReferencesTraceBackwards(xsize, ysize, recursion_level - 1,
- argb, cache_bits, &refs)) {
+ argb, quality, cache_bits, &refs)) {
goto Error;
}
} else {
@@ -452,20 +473,23 @@ static WEBP_INLINE double GetDistanceCost(const CostModel* const m,
static int BackwardReferencesHashChainDistanceOnly(
int xsize, int ysize, int recursive_cost_model, const uint32_t* const argb,
- int cache_bits, uint32_t* const dist_array) {
+ int quality, int cache_bits, uint32_t* const dist_array) {
int i;
int ok = 0;
int cc_init = 0;
- const int quality = 100;
const int pix_count = xsize * ysize;
const int use_color_cache = (cache_bits > 0);
- double* const cost =
- (double*)WebPSafeMalloc((uint64_t)pix_count, sizeof(*cost));
+ float* const cost =
+ (float*)WebPSafeMalloc((uint64_t)pix_count, sizeof(*cost));
CostModel* cost_model = (CostModel*)malloc(sizeof(*cost_model));
HashChain* hash_chain = (HashChain*)malloc(sizeof(*hash_chain));
VP8LColorCache hashers;
const double mul0 = (recursive_cost_model != 0) ? 1.0 : 0.68;
const double mul1 = (recursive_cost_model != 0) ? 1.0 : 0.82;
+ const int min_distance_code = 2; // TODO(vikasa): tune as function of quality
+ int window_size = WINDOW_SIZE;
+ int iter_pos = 1;
+ int iter_limit = -1;
if (cost == NULL || cost_model == NULL || hash_chain == NULL) goto Error;
@@ -477,15 +501,17 @@ static int BackwardReferencesHashChainDistanceOnly(
}
if (!CostModelBuild(cost_model, xsize, ysize, recursive_cost_model, argb,
- cache_bits)) {
+ quality, cache_bits)) {
goto Error;
}
- for (i = 0; i < pix_count; ++i) cost[i] = 1e100;
+ for (i = 0; i < pix_count; ++i) cost[i] = 1e38f;
// We loop one pixel at a time, but store all currently best points to
// non-processed locations from this point.
dist_array[0] = 0;
+ GetParamsForHashChainFindCopy(quality, xsize, &window_size, &iter_pos,
+ &iter_limit);
for (i = 0; i < pix_count; ++i) {
double prev_cost = 0.0;
int shortmax;
@@ -500,7 +526,8 @@ static int BackwardReferencesHashChainDistanceOnly(
if (maxlen > pix_count - i) {
maxlen = pix_count - i;
}
- HashChainFindCopy(hash_chain, quality, i, xsize, argb, maxlen,
+ HashChainFindCopy(hash_chain, i, xsize, argb, maxlen,
+ window_size, iter_pos, iter_limit,
&offset, &len);
}
if (len >= MIN_LENGTH) {
@@ -509,16 +536,15 @@ static int BackwardReferencesHashChainDistanceOnly(
prev_cost + GetDistanceCost(cost_model, code);
int k;
for (k = 1; k < len; ++k) {
- const double cost_val =
- distance_cost + GetLengthCost(cost_model, k);
+ const double cost_val = distance_cost + GetLengthCost(cost_model, k);
if (cost[i + k] > cost_val) {
- cost[i + k] = cost_val;
+ cost[i + k] = (float)cost_val;
dist_array[i + k] = k + 1;
}
}
// This if is for speedup only. It roughly doubles the speed, and
// makes compression worse by .1 %.
- if (len >= 128 && code < 2) {
+ if (len >= 128 && code <= min_distance_code) {
// Long copy for short distances, let's skip the middle
// lookups for better copies.
// 1) insert the hashes.
@@ -554,7 +580,7 @@ static int BackwardReferencesHashChainDistanceOnly(
cost_val += GetLiteralCost(cost_model, argb[i]) * mul1;
}
if (cost[i] > cost_val) {
- cost[i] = cost_val;
+ cost[i] = (float)cost_val;
dist_array[i] = 1; // only one is inserted.
}
if (use_color_cache) VP8LColorCacheInsert(&hashers, argb[i]);
@@ -572,40 +598,30 @@ Error:
return ok;
}
-static int TraceBackwards(const uint32_t* const dist_array,
- int dist_array_size,
- uint32_t** const chosen_path,
- int* const chosen_path_size) {
- int i;
- // Count how many.
- int count = 0;
- for (i = dist_array_size - 1; i >= 0; ) {
- int k = dist_array[i];
- assert(k >= 1);
- ++count;
- i -= k;
- }
- // Allocate.
- *chosen_path_size = count;
- *chosen_path =
- (uint32_t*)WebPSafeMalloc((uint64_t)count, sizeof(**chosen_path));
- if (*chosen_path == NULL) return 0;
-
- // Write in reverse order.
- for (i = dist_array_size - 1; i >= 0; ) {
- int k = dist_array[i];
- assert(k >= 1);
- (*chosen_path)[--count] = k;
- i -= k;
- }
- return 1;
+// We pack the path at the end of *dist_array and return
+// a pointer to this part of the array. Example:
+// dist_array = [1x2xx3x2] => packed [1x2x1232], chosen_path = [1232]
+static void TraceBackwards(uint32_t* const dist_array,
+ int dist_array_size,
+ uint32_t** const chosen_path,
+ int* const chosen_path_size) {
+ uint32_t* path = dist_array + dist_array_size;
+ uint32_t* cur = dist_array + dist_array_size - 1;
+ while (cur >= dist_array) {
+ const int k = *cur;
+ --path;
+ *path = k;
+ cur -= k;
+ }
+ *chosen_path = path;
+ *chosen_path_size = dist_array + dist_array_size - path;
}
static int BackwardReferencesHashChainFollowChosenPath(
- int xsize, int ysize, const uint32_t* const argb, int cache_bits,
+ int xsize, int ysize, const uint32_t* const argb,
+ int quality, int cache_bits,
const uint32_t* const chosen_path, int chosen_path_size,
VP8LBackwardRefs* const refs) {
- const int quality = 100;
const int pix_count = xsize * ysize;
const int use_color_cache = (cache_bits > 0);
int size = 0;
@@ -614,6 +630,9 @@ static int BackwardReferencesHashChainFollowChosenPath(
int ix;
int ok = 0;
int cc_init = 0;
+ int window_size = WINDOW_SIZE;
+ int iter_pos = 1;
+ int iter_limit = -1;
HashChain* hash_chain = (HashChain*)malloc(sizeof(*hash_chain));
VP8LColorCache hashers;
@@ -626,13 +645,16 @@ static int BackwardReferencesHashChainFollowChosenPath(
}
refs->size = 0;
+ GetParamsForHashChainFindCopy(quality, xsize, &window_size, &iter_pos,
+ &iter_limit);
for (ix = 0; ix < chosen_path_size; ++ix, ++size) {
int offset = 0;
int len = 0;
int maxlen = chosen_path[ix];
if (maxlen != 1) {
- HashChainFindCopy(hash_chain, quality,
- i, xsize, argb, maxlen, &offset, &len);
+ HashChainFindCopy(hash_chain, i, xsize, argb, maxlen,
+ window_size, iter_pos, iter_limit,
+ &offset, &len);
assert(len == maxlen);
refs->refs[size] = PixOrCopyCreateCopy(offset, len);
if (use_color_cache) {
@@ -675,7 +697,7 @@ Error:
static int BackwardReferencesTraceBackwards(int xsize, int ysize,
int recursive_cost_model,
const uint32_t* const argb,
- int cache_bits,
+ int quality, int cache_bits,
VP8LBackwardRefs* const refs) {
int ok = 0;
const int dist_array_size = xsize * ysize;
@@ -687,22 +709,18 @@ static int BackwardReferencesTraceBackwards(int xsize, int ysize,
if (dist_array == NULL) goto Error;
if (!BackwardReferencesHashChainDistanceOnly(
- xsize, ysize, recursive_cost_model, argb, cache_bits, dist_array)) {
- goto Error;
- }
- if (!TraceBackwards(dist_array, dist_array_size,
- &chosen_path, &chosen_path_size)) {
+ xsize, ysize, recursive_cost_model, argb, quality, cache_bits,
+ dist_array)) {
goto Error;
}
- free(dist_array); // no need to retain this memory any longer
- dist_array = NULL;
+ TraceBackwards(dist_array, dist_array_size, &chosen_path, &chosen_path_size);
if (!BackwardReferencesHashChainFollowChosenPath(
- xsize, ysize, argb, cache_bits, chosen_path, chosen_path_size, refs)) {
+ xsize, ysize, argb, quality, cache_bits, chosen_path, chosen_path_size,
+ refs)) {
goto Error;
}
ok = 1;
Error:
- free(chosen_path);
free(dist_array);
return ok;
}
@@ -762,8 +780,8 @@ int VP8LGetBackwardReferences(int width, int height,
// Choose appropriate backward reference.
if (lz77_is_useful) {
- // TraceBackwards is costly. Run it for higher qualities.
- const int try_lz77_trace_backwards = (quality >= 75);
+ // TraceBackwards is costly. Don't execute it at lower quality (q <= 10).
+ const int try_lz77_trace_backwards = (quality > 10);
*best = refs_lz77; // default guess: lz77 is better
VP8LClearBackwardRefs(&refs_rle);
if (try_lz77_trace_backwards) {
@@ -772,8 +790,8 @@ int VP8LGetBackwardReferences(int width, int height,
if (!VP8LBackwardRefsAlloc(&refs_trace, num_pix)) {
goto End;
}
- if (BackwardReferencesTraceBackwards(
- width, height, recursion_level, argb, cache_bits, &refs_trace)) {
+ if (BackwardReferencesTraceBackwards(width, height, recursion_level, argb,
+ quality, cache_bits, &refs_trace)) {
VP8LClearBackwardRefs(&refs_lz77);
*best = refs_trace;
}
diff --git a/src/enc/backward_references.h b/src/enc/backward_references.h
index cda7c2b1..54628514 100644
--- a/src/enc/backward_references.h
+++ b/src/enc/backward_references.h
@@ -65,11 +65,11 @@ static WEBP_INLINE int BitsLog2Floor(uint32_t n) {
#endif
static WEBP_INLINE int VP8LBitsLog2Ceiling(uint32_t n) {
- const int floor = BitsLog2Floor(n);
+ const int log_floor = BitsLog2Floor(n);
if (n == (n & ~(n - 1))) // zero or a power of two.
- return floor;
+ return log_floor;
else
- return floor + 1;
+ return log_floor + 1;
}
// Splitting of distance and length codes into prefixes and
diff --git a/src/enc/frame.c b/src/enc/frame.c
index bdd36006..dd345ed0 100644
--- a/src/enc/frame.c
+++ b/src/enc/frame.c
@@ -45,10 +45,10 @@ const uint8_t VP8EncBands[16 + 1] = {
0 // sentinel
};
-static const uint8_t kCat3[] = { 173, 148, 140 };
-static const uint8_t kCat4[] = { 176, 155, 140, 135 };
-static const uint8_t kCat5[] = { 180, 157, 141, 134, 130 };
-static const uint8_t kCat6[] =
+const uint8_t VP8Cat3[] = { 173, 148, 140 };
+const uint8_t VP8Cat4[] = { 176, 155, 140, 135 };
+const uint8_t VP8Cat5[] = { 180, 157, 141, 134, 130 };
+const uint8_t VP8Cat6[] =
{ 254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129 };
//------------------------------------------------------------------------------
@@ -113,7 +113,8 @@ static int Record(int bit, proba_t* const stats) {
// Note: no need to record the fixed probas.
static int RecordCoeffs(int ctx, const VP8Residual* const res) {
int n = res->first;
- proba_t* s = res->stats[VP8EncBands[n]][ctx];
+ // should be stats[VP8EncBands[n]], but it's equivalent for n=0 or 1
+ proba_t* s = res->stats[n][ctx];
if (res->last < 0) {
Record(0, s + 0);
return 0;
@@ -212,6 +213,47 @@ static int FinalizeTokenProbas(VP8Encoder* const enc) {
}
//------------------------------------------------------------------------------
+// Finalize Segment probability based on the coding tree
+
+static int GetProba(int a, int b) {
+ const int total = a + b;
+ return (total == 0) ? 255 // that's the default probability.
+ : (255 * a + total / 2) / total; // rounded proba
+}
+
+static void SetSegmentProbas(VP8Encoder* const enc) {
+ int p[NUM_MB_SEGMENTS] = { 0 };
+ int n;
+
+ for (n = 0; n < enc->mb_w_ * enc->mb_h_; ++n) {
+ const VP8MBInfo* const mb = &enc->mb_info_[n];
+ p[mb->segment_]++;
+ }
+ if (enc->pic_->stats != NULL) {
+ for (n = 0; n < NUM_MB_SEGMENTS; ++n) {
+ enc->pic_->stats->segment_size[n] = p[n];
+ }
+ }
+ if (enc->segment_hdr_.num_segments_ > 1) {
+ uint8_t* const probas = enc->proba_.segments_;
+ probas[0] = GetProba(p[0] + p[1], p[2] + p[3]);
+ probas[1] = GetProba(p[0], p[1]);
+ probas[2] = GetProba(p[2], p[3]);
+
+ enc->segment_hdr_.update_map_ =
+ (probas[0] != 255) || (probas[1] != 255) || (probas[2] != 255);
+ enc->segment_hdr_.size_ =
+ p[0] * (VP8BitCost(0, probas[0]) + VP8BitCost(0, probas[1])) +
+ p[1] * (VP8BitCost(0, probas[0]) + VP8BitCost(1, probas[1])) +
+ p[2] * (VP8BitCost(1, probas[0]) + VP8BitCost(0, probas[2])) +
+ p[3] * (VP8BitCost(1, probas[0]) + VP8BitCost(1, probas[2]));
+ } else {
+ enc->segment_hdr_.update_map_ = 0;
+ enc->segment_hdr_.size_ = 0;
+ }
+}
+
+//------------------------------------------------------------------------------
// helper functions for residuals struct VP8Residual.
static void InitResidual(int first, int coeff_type,
@@ -239,18 +281,19 @@ static void SetResidualCoeffs(const int16_t* const coeffs,
//------------------------------------------------------------------------------
// Mode costs
-static int GetResidualCost(int ctx, const VP8Residual* const res) {
+static int GetResidualCost(int ctx0, const VP8Residual* const res) {
int n = res->first;
- int p0 = res->prob[VP8EncBands[n]][ctx][0];
- const uint16_t* t = res->cost[VP8EncBands[n]][ctx];
+ // should be prob[VP8EncBands[n]], but it's equivalent for n=0 or 1
+ int p0 = res->prob[n][ctx0][0];
+ const uint16_t* t = res->cost[n][ctx0];
int cost;
if (res->last < 0) {
return VP8BitCost(0, p0);
}
cost = 0;
- while (n <= res->last) {
- const int v = res->coeffs[n];
+ while (n < res->last) {
+ int v = res->coeffs[n];
const int b = VP8EncBands[n + 1];
++n;
if (v == 0) {
@@ -259,19 +302,28 @@ static int GetResidualCost(int ctx, const VP8Residual* const res) {
t = res->cost[b][0];
continue;
}
+ v = abs(v);
cost += VP8BitCost(1, p0);
- if (2u >= (unsigned int)(v + 1)) { // v = -1 or 1
- // short-case for "VP8LevelCost(t, 1)" (256 is VP8LevelFixedCosts[1]):
- cost += 256 + t[1];
- p0 = res->prob[b][1][0];
- t = res->cost[b][1];
- } else {
- cost += VP8LevelCost(t, abs(v));
- p0 = res->prob[b][2][0];
- t = res->cost[b][2];
+ cost += VP8LevelCost(t, v);
+ {
+ const int ctx = (v == 1) ? 1 : 2;
+ p0 = res->prob[b][ctx][0];
+ t = res->cost[b][ctx];
+ }
+ }
+ // Last coefficient is always non-zero
+ {
+ const int v = abs(res->coeffs[n]);
+ assert(v != 0);
+ cost += VP8BitCost(1, p0);
+ cost += VP8LevelCost(t, v);
+ if (n < 15) {
+ const int b = VP8EncBands[n + 1];
+ const int ctx = (v == 1) ? 1 : 2;
+ const int last_p0 = res->prob[b][ctx][0];
+ cost += VP8BitCost(0, last_p0);
}
}
- if (n < 16) cost += VP8BitCost(0, p0);
return cost;
}
@@ -342,7 +394,8 @@ int VP8GetCostUV(VP8EncIterator* const it, const VP8ModeScore* const rd) {
static int PutCoeffs(VP8BitWriter* const bw, int ctx, const VP8Residual* res) {
int n = res->first;
- const uint8_t* p = res->prob[VP8EncBands[n]][ctx];
+ // should be prob[VP8EncBands[n]], but it's equivalent for n=0 or 1
+ const uint8_t* p = res->prob[n][ctx];
if (!VP8PutBit(bw, res->last >= 0, p[0])) {
return 0;
}
@@ -371,30 +424,30 @@ static int PutCoeffs(VP8BitWriter* const bw, int ctx, const VP8Residual* res) {
} else {
int mask;
const uint8_t* tab;
- if (v < 3 + (8 << 1)) { // kCat3 (3b)
+ if (v < 3 + (8 << 1)) { // VP8Cat3 (3b)
VP8PutBit(bw, 0, p[8]);
VP8PutBit(bw, 0, p[9]);
v -= 3 + (8 << 0);
mask = 1 << 2;
- tab = kCat3;
- } else if (v < 3 + (8 << 2)) { // kCat4 (4b)
+ tab = VP8Cat3;
+ } else if (v < 3 + (8 << 2)) { // VP8Cat4 (4b)
VP8PutBit(bw, 0, p[8]);
VP8PutBit(bw, 1, p[9]);
v -= 3 + (8 << 1);
mask = 1 << 3;
- tab = kCat4;
- } else if (v < 3 + (8 << 3)) { // kCat5 (5b)
+ tab = VP8Cat4;
+ } else if (v < 3 + (8 << 3)) { // VP8Cat5 (5b)
VP8PutBit(bw, 1, p[8]);
VP8PutBit(bw, 0, p[10]);
v -= 3 + (8 << 2);
mask = 1 << 4;
- tab = kCat5;
- } else { // kCat6 (11b)
+ tab = VP8Cat5;
+ } else { // VP8Cat6 (11b)
VP8PutBit(bw, 1, p[8]);
VP8PutBit(bw, 1, p[10]);
v -= 3 + (8 << 3);
mask = 1 << 10;
- tab = kCat6;
+ tab = VP8Cat6;
}
while (mask) {
VP8PutBit(bw, !!(v & mask), *tab++);
@@ -514,134 +567,8 @@ static void RecordResiduals(VP8EncIterator* const it,
#ifdef USE_TOKEN_BUFFER
-void VP8TBufferInit(VP8TBuffer* const b) {
- b->rows_ = NULL;
- b->tokens_ = NULL;
- b->last_ = &b->rows_;
- b->left_ = 0;
- b->error_ = 0;
-}
-
-int VP8TBufferNewPage(VP8TBuffer* const b) {
- VP8Tokens* const page = b->error_ ? NULL : (VP8Tokens*)malloc(sizeof(*page));
- if (page == NULL) {
- b->error_ = 1;
- return 0;
- }
- *b->last_ = page;
- b->last_ = &page->next_;
- b->left_ = MAX_NUM_TOKEN;
- b->tokens_ = page->tokens_;
- return 1;
-}
-
-void VP8TBufferClear(VP8TBuffer* const b) {
- if (b != NULL) {
- const VP8Tokens* p = b->rows_;
- while (p != NULL) {
- const VP8Tokens* const next = p->next_;
- free((void*)p);
- p = next;
- }
- VP8TBufferInit(b);
- }
-}
-
-int VP8EmitTokens(const VP8TBuffer* const b, VP8BitWriter* const bw,
- const uint8_t* const probas) {
- VP8Tokens* p = b->rows_;
- if (b->error_) return 0;
- while (p != NULL) {
- const int N = (p->next_ == NULL) ? b->left_ : 0;
- int n = MAX_NUM_TOKEN;
- while (n-- > N) {
- VP8PutBit(bw, (p->tokens_[n] >> 15) & 1, probas[p->tokens_[n] & 0x7fff]);
- }
- p = p->next_;
- }
- return 1;
-}
-
-#define TOKEN_ID(b, ctx, p) ((p) + NUM_PROBAS * ((ctx) + (b) * NUM_CTX))
-
-static int RecordCoeffTokens(int ctx, const VP8Residual* const res,
- VP8TBuffer* tokens) {
- int n = res->first;
- int b = VP8EncBands[n];
- if (!VP8AddToken(tokens, res->last >= 0, TOKEN_ID(b, ctx, 0))) {
- return 0;
- }
-
- while (n < 16) {
- const int c = res->coeffs[n++];
- const int sign = c < 0;
- int v = sign ? -c : c;
- const int base_id = TOKEN_ID(b, ctx, 0);
- if (!VP8AddToken(tokens, v != 0, base_id + 1)) {
- b = VP8EncBands[n];
- ctx = 0;
- continue;
- }
- if (!VP8AddToken(tokens, v > 1, base_id + 2)) {
- b = VP8EncBands[n];
- ctx = 1;
- } else {
- if (!VP8AddToken(tokens, v > 4, base_id + 3)) {
- if (VP8AddToken(tokens, v != 2, base_id + 4))
- VP8AddToken(tokens, v == 4, base_id + 5);
- } else if (!VP8AddToken(tokens, v > 10, base_id + 6)) {
- if (!VP8AddToken(tokens, v > 6, base_id + 7)) {
-// VP8AddToken(tokens, v == 6, 159);
- } else {
-// VP8AddToken(tokens, v >= 9, 165);
-// VP8AddToken(tokens, !(v & 1), 145);
- }
- } else {
- int mask;
- const uint8_t* tab;
- if (v < 3 + (8 << 1)) { // kCat3 (3b)
- VP8AddToken(tokens, 0, base_id + 8);
- VP8AddToken(tokens, 0, base_id + 9);
- v -= 3 + (8 << 0);
- mask = 1 << 2;
- tab = kCat3;
- } else if (v < 3 + (8 << 2)) { // kCat4 (4b)
- VP8AddToken(tokens, 0, base_id + 8);
- VP8AddToken(tokens, 1, base_id + 9);
- v -= 3 + (8 << 1);
- mask = 1 << 3;
- tab = kCat4;
- } else if (v < 3 + (8 << 3)) { // kCat5 (5b)
- VP8AddToken(tokens, 1, base_id + 8);
- VP8AddToken(tokens, 0, base_id + 10);
- v -= 3 + (8 << 2);
- mask = 1 << 4;
- tab = kCat5;
- } else { // kCat6 (11b)
- VP8AddToken(tokens, 1, base_id + 8);
- VP8AddToken(tokens, 1, base_id + 10);
- v -= 3 + (8 << 3);
- mask = 1 << 10;
- tab = kCat6;
- }
- while (mask) {
- // VP8AddToken(tokens, !!(v & mask), *tab++);
- mask >>= 1;
- }
- }
- ctx = 2;
- }
- b = VP8EncBands[n];
- // VP8PutBitUniform(bw, sign);
- if (n == 16 || !VP8AddToken(tokens, n <= res->last, TOKEN_ID(b, ctx, 0))) {
- return 1; // EOB
- }
- }
- return 1;
-}
-
-static void RecordTokens(VP8EncIterator* const it,
- const VP8ModeScore* const rd, VP8TBuffer tokens[2]) {
+static void RecordTokens(VP8EncIterator* const it, const VP8ModeScore* const rd,
+ VP8TBuffer* const tokens) {
int x, y, ch;
VP8Residual res;
VP8Encoder* const enc = it->enc_;
@@ -651,7 +578,8 @@ static void RecordTokens(VP8EncIterator* const it,
InitResidual(0, 1, enc, &res);
SetResidualCoeffs(rd->y_dc_levels, &res);
// TODO(skal): FIX -> it->top_nz_[8] = it->left_nz_[8] =
- RecordCoeffTokens(it->top_nz_[8] + it->left_nz_[8], &res, &tokens[0]);
+ VP8RecordCoeffTokens(it->top_nz_[8] + it->left_nz_[8],
+ res.first, res.last, res.coeffs, tokens);
InitResidual(1, 0, enc, &res);
} else {
InitResidual(0, 3, enc, &res);
@@ -663,7 +591,7 @@ static void RecordTokens(VP8EncIterator* const it,
const int ctx = it->top_nz_[x] + it->left_nz_[y];
SetResidualCoeffs(rd->y_ac_levels[x + y * 4], &res);
it->top_nz_[x] = it->left_nz_[y] =
- RecordCoeffTokens(ctx, &res, &tokens[0]);
+ VP8RecordCoeffTokens(ctx, res.first, res.last, res.coeffs, tokens);
}
}
@@ -675,7 +603,7 @@ static void RecordTokens(VP8EncIterator* const it,
const int ctx = it->top_nz_[4 + ch + x] + it->left_nz_[4 + ch + y];
SetResidualCoeffs(rd->uv_levels[ch * 2 + x + y * 2], &res);
it->top_nz_[4 + ch + x] = it->left_nz_[4 + ch + y] =
- RecordCoeffTokens(ctx, &res, &tokens[1]);
+ VP8RecordCoeffTokens(ctx, res.first, res.last, res.coeffs, tokens);
}
}
}
@@ -736,6 +664,7 @@ static void StoreSideInfo(const VP8EncIterator* const it) {
const int b = (int)((it->luma_bits_ + it->uv_bits_ + 7) >> 3);
*info = (b > 255) ? 255 : b; break;
}
+ case 7: *info = mb->alpha_; break;
default: *info = 0; break;
};
}
@@ -848,6 +777,7 @@ static int OneStatPass(VP8Encoder* const enc, float q, int rd_opt, int nb_mbs,
}
VP8SetSegmentParams(enc, q); // setup segment quantizations and filters
+ SetSegmentProbas(enc); // compute segment probabilities
ResetStats(enc);
ResetTokenStats(enc);
@@ -915,7 +845,7 @@ int VP8StatLoop(VP8Encoder* const enc) {
#if DEBUG_SEARCH
printf("#%d size=%d PSNR=%.2f q=%.2f\n", pass, size, PSNR, q);
#endif
- if (!size) return 0;
+ if (size == 0) return 0;
if (enc->config_->target_PSNR > 0) {
criterion = (PSNR < enc->config_->target_PSNR);
} else {
diff --git a/src/enc/histogram.c b/src/enc/histogram.c
index ca838e06..c5b84bf7 100644
--- a/src/enc/histogram.c
+++ b/src/enc/histogram.c
@@ -55,9 +55,9 @@ VP8LHistogramSet* VP8LAllocateHistogramSet(int size, int cache_bits) {
int i;
VP8LHistogramSet* set;
VP8LHistogram* bulk;
- const uint64_t total_size = (uint64_t)sizeof(*set)
- + size * sizeof(*set->histograms)
- + size * sizeof(**set->histograms);
+ const uint64_t total_size = sizeof(*set)
+ + (uint64_t)size * sizeof(*set->histograms)
+ + (uint64_t)size * sizeof(**set->histograms);
uint8_t* memory = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*memory));
if (memory == NULL) return NULL;
@@ -249,14 +249,15 @@ static uint32_t MyRand(uint32_t *seed) {
}
static int HistogramCombine(const VP8LHistogramSet* const in,
- VP8LHistogramSet* const out, int num_pairs) {
+ VP8LHistogramSet* const out, int iter_mult,
+ int num_pairs, int num_tries_no_success) {
int ok = 0;
int i, iter;
uint32_t seed = 0;
int tries_with_no_success = 0;
- const int min_cluster_size = 2;
int out_size = in->size;
- const int outer_iters = in->size * 3;
+ const int outer_iters = in->size * iter_mult;
+ const int min_cluster_size = 2;
VP8LHistogram* const histos = (VP8LHistogram*)malloc(2 * sizeof(*histos));
VP8LHistogram* cur_combo = histos + 0; // trial merged histogram
VP8LHistogram* best_combo = histos + 1; // best merged histogram so far
@@ -271,12 +272,12 @@ static int HistogramCombine(const VP8LHistogramSet* const in,
// Collapse similar histograms in 'out'.
for (iter = 0; iter < outer_iters && out_size >= min_cluster_size; ++iter) {
- // We pick the best pair to be combined out of 'inner_iters' pairs.
double best_cost_diff = 0.;
int best_idx1 = 0, best_idx2 = 1;
int j;
+ const int num_tries = (num_pairs < out_size) ? num_pairs : out_size;
seed += iter;
- for (j = 0; j < num_pairs; ++j) {
+ for (j = 0; j < num_tries; ++j) {
double curr_cost_diff;
// Choose two histograms at random and try to combine them.
const uint32_t idx1 = MyRand(&seed) % out_size;
@@ -315,7 +316,7 @@ static int HistogramCombine(const VP8LHistogramSet* const in,
}
tries_with_no_success = 0;
}
- if (++tries_with_no_success >= 50) {
+ if (++tries_with_no_success >= num_tries_no_success) {
break;
}
}
@@ -384,16 +385,27 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize,
int ok = 0;
const int histo_xsize = histo_bits ? VP8LSubSampleSize(xsize, histo_bits) : 1;
const int histo_ysize = histo_bits ? VP8LSubSampleSize(ysize, histo_bits) : 1;
- const int num_histo_pairs = 10 + quality / 2; // For HistogramCombine().
const int histo_image_raw_size = histo_xsize * histo_ysize;
+
+ // Heuristic params for HistogramCombine().
+ const int num_tries_no_success = 8 + (quality >> 1);
+ const int iter_mult = (quality < 27) ? 1 : 1 + ((quality - 27) >> 4);
+ int num_pairs = (quality >> 1);
+
VP8LHistogramSet* const image_out =
VP8LAllocateHistogramSet(histo_image_raw_size, cache_bits);
if (image_out == NULL) return 0;
+ if (num_pairs > (histo_image_raw_size >> 2)) {
+ num_pairs = histo_image_raw_size >> 2;
+ }
+ num_pairs += 10;
+
// Build histogram image.
HistogramBuildImage(xsize, histo_bits, refs, image_out);
// Collapse similar histograms.
- if (!HistogramCombine(image_out, image_in, num_histo_pairs)) {
+ if (!HistogramCombine(image_out, image_in, iter_mult, num_pairs,
+ num_tries_no_success)) {
goto Error;
}
// Find the optimal map from original histograms to the final ones.
diff --git a/src/enc/picture.c b/src/enc/picture.c
index 44eed060..739a7aa2 100644
--- a/src/enc/picture.c
+++ b/src/enc/picture.c
@@ -290,8 +290,11 @@ int WebPPictureView(const WebPPicture* src,
dst->y = src->y + top * src->y_stride + left;
dst->u = src->u + (top >> 1) * src->uv_stride + (left >> 1);
dst->v = src->v + (top >> 1) * src->uv_stride + (left >> 1);
+ dst->y_stride = src->y_stride;
+ dst->uv_stride = src->uv_stride;
if (src->a != NULL) {
dst->a = src->a + top * src->a_stride + left;
+ dst->a_stride = src->a_stride;
}
#ifdef WEBP_EXPERIMENTAL_FEATURES
if (src->u0 != NULL) {
@@ -299,10 +302,12 @@ int WebPPictureView(const WebPPicture* src,
IS_YUV_CSP(dst->colorspace, WEBP_YUV422) ? (left >> 1) : left;
dst->u0 = src->u0 + top * src->uv0_stride + left_pos;
dst->v0 = src->v0 + top * src->uv0_stride + left_pos;
+ dst->uv0_stride = src->uv0_stride;
}
#endif
} else {
dst->argb = src->argb + top * src->argb_stride + left;
+ dst->argb_stride = src->argb_stride;
}
return 1;
}
@@ -801,11 +806,11 @@ int WebPPictureYUVAToARGB(WebPPicture* picture) {
// Insert alpha values if needed, in replacement for the default 0xff ones.
if (picture->colorspace & WEBP_CSP_ALPHA_BIT) {
for (y = 0; y < height; ++y) {
- uint32_t* const dst = picture->argb + y * picture->argb_stride;
+ uint32_t* const argb_dst = picture->argb + y * picture->argb_stride;
const uint8_t* const src = picture->a + y * picture->a_stride;
int x;
for (x = 0; x < width; ++x) {
- dst[x] = (dst[x] & 0x00ffffffu) | (src[x] << 24);
+ argb_dst[x] = (argb_dst[x] & 0x00ffffffu) | (src[x] << 24);
}
}
}
@@ -906,67 +911,135 @@ void WebPCleanupTransparentArea(WebPPicture* pic) {
#undef SIZE
#undef SIZE2
+//------------------------------------------------------------------------------
+// local-min distortion
+//
+// For every pixel in the *reference* picture, we search for the local best
+// match in the compressed image. This is not a symmetrical measure.
+
+// search radius. Shouldn't be too large.
+#define RADIUS 2
+
+static float AccumulateLSIM(const uint8_t* src, int src_stride,
+ const uint8_t* ref, int ref_stride,
+ int w, int h) {
+ int x, y;
+ double total_sse = 0.;
+ for (y = 0; y < h; ++y) {
+ const int y_0 = (y - RADIUS < 0) ? 0 : y - RADIUS;
+ const int y_1 = (y + RADIUS + 1 >= h) ? h : y + RADIUS + 1;
+ for (x = 0; x < w; ++x) {
+ const int x_0 = (x - RADIUS < 0) ? 0 : x - RADIUS;
+ const int x_1 = (x + RADIUS + 1 >= w) ? w : x + RADIUS + 1;
+ double best_sse = 255. * 255.;
+ const double value = (double)ref[y * ref_stride + x];
+ int i, j;
+ for (j = y_0; j < y_1; ++j) {
+ const uint8_t* s = src + j * src_stride;
+ for (i = x_0; i < x_1; ++i) {
+ const double sse = (double)(s[i] - value) * (s[i] - value);
+ if (sse < best_sse) best_sse = sse;
+ }
+ }
+ total_sse += best_sse;
+ }
+ }
+ return (float)total_sse;
+}
+#undef RADIUS
//------------------------------------------------------------------------------
// Distortion
// Max value returned in case of exact similarity.
static const double kMinDistortion_dB = 99.;
+static float GetPSNR(const double v) {
+ return (float)((v > 0.) ? -4.3429448 * log(v / (255 * 255.))
+ : kMinDistortion_dB);
+}
-int WebPPictureDistortion(const WebPPicture* pic1, const WebPPicture* pic2,
+int WebPPictureDistortion(const WebPPicture* src, const WebPPicture* ref,
int type, float result[5]) {
- int c;
DistoStats stats[5];
int has_alpha;
+ int uv_w, uv_h;
- if (pic1 == NULL || pic2 == NULL ||
- pic1->width != pic2->width || pic1->height != pic2->height ||
- pic1->y == NULL || pic2->y == NULL ||
- pic1->u == NULL || pic2->u == NULL ||
- pic1->v == NULL || pic2->v == NULL ||
+ if (src == NULL || ref == NULL ||
+ src->width != ref->width || src->height != ref->height ||
+ src->y == NULL || ref->y == NULL ||
+ src->u == NULL || ref->u == NULL ||
+ src->v == NULL || ref->v == NULL ||
result == NULL) {
return 0;
}
// TODO(skal): provide distortion for ARGB too.
- if (pic1->use_argb == 1 || pic1->use_argb != pic2->use_argb) {
+ if (src->use_argb == 1 || src->use_argb != ref->use_argb) {
return 0;
}
- has_alpha = !!(pic1->colorspace & WEBP_CSP_ALPHA_BIT);
- if (has_alpha != !!(pic2->colorspace & WEBP_CSP_ALPHA_BIT) ||
- (has_alpha && (pic1->a == NULL || pic2->a == NULL))) {
+ has_alpha = !!(src->colorspace & WEBP_CSP_ALPHA_BIT);
+ if (has_alpha != !!(ref->colorspace & WEBP_CSP_ALPHA_BIT) ||
+ (has_alpha && (src->a == NULL || ref->a == NULL))) {
return 0;
}
memset(stats, 0, sizeof(stats));
- VP8SSIMAccumulatePlane(pic1->y, pic1->y_stride,
- pic2->y, pic2->y_stride,
- pic1->width, pic1->height, &stats[0]);
- VP8SSIMAccumulatePlane(pic1->u, pic1->uv_stride,
- pic2->u, pic2->uv_stride,
- (pic1->width + 1) >> 1, (pic1->height + 1) >> 1,
- &stats[1]);
- VP8SSIMAccumulatePlane(pic1->v, pic1->uv_stride,
- pic2->v, pic2->uv_stride,
- (pic1->width + 1) >> 1, (pic1->height + 1) >> 1,
- &stats[2]);
- if (has_alpha) {
- VP8SSIMAccumulatePlane(pic1->a, pic1->a_stride,
- pic2->a, pic2->a_stride,
- pic1->width, pic1->height, &stats[3]);
- }
- for (c = 0; c <= 4; ++c) {
- if (type == 1) {
- const double v = VP8SSIMGet(&stats[c]);
- result[c] = (float)((v < 1.) ? -10.0 * log10(1. - v)
- : kMinDistortion_dB);
- } else {
- const double v = VP8SSIMGetSquaredError(&stats[c]);
- result[c] = (float)((v > 0.) ? -4.3429448 * log(v / (255 * 255.))
- : kMinDistortion_dB);
+
+ uv_w = HALVE(src->width);
+ uv_h = HALVE(src->height);
+ if (type >= 2) {
+ float sse[4];
+ sse[0] = AccumulateLSIM(src->y, src->y_stride,
+ ref->y, ref->y_stride, src->width, src->height);
+ sse[1] = AccumulateLSIM(src->u, src->uv_stride,
+ ref->u, ref->uv_stride, uv_w, uv_h);
+ sse[2] = AccumulateLSIM(src->v, src->uv_stride,
+ ref->v, ref->uv_stride, uv_w, uv_h);
+ sse[3] = has_alpha ? AccumulateLSIM(src->a, src->a_stride,
+ ref->a, ref->a_stride,
+ src->width, src->height)
+ : 0.f;
+ result[0] = GetPSNR(sse[0] / (src->width * src->height));
+ result[1] = GetPSNR(sse[1] / (uv_w * uv_h));
+ result[2] = GetPSNR(sse[2] / (uv_w * uv_h));
+ result[3] = GetPSNR(sse[3] / (src->width * src->height));
+ {
+ double total_sse = sse[0] + sse[1] + sse[2];
+ int total_pixels = src->width * src->height + 2 * uv_w * uv_h;
+ if (has_alpha) {
+ total_pixels += src->width * src->height;
+ total_sse += sse[3];
+ }
+ result[4] = GetPSNR(total_sse / total_pixels);
+ }
+ } else {
+ int c;
+ VP8SSIMAccumulatePlane(src->y, src->y_stride,
+ ref->y, ref->y_stride,
+ src->width, src->height, &stats[0]);
+ VP8SSIMAccumulatePlane(src->u, src->uv_stride,
+ ref->u, ref->uv_stride,
+ uv_w, uv_h, &stats[1]);
+ VP8SSIMAccumulatePlane(src->v, src->uv_stride,
+ ref->v, ref->uv_stride,
+ uv_w, uv_h, &stats[2]);
+ if (has_alpha) {
+ VP8SSIMAccumulatePlane(src->a, src->a_stride,
+ ref->a, ref->a_stride,
+ src->width, src->height, &stats[3]);
+ }
+ for (c = 0; c <= 4; ++c) {
+ if (type == 1) {
+ const double v = VP8SSIMGet(&stats[c]);
+ result[c] = (float)((v < 1.) ? -10.0 * log10(1. - v)
+ : kMinDistortion_dB);
+ } else {
+ const double v = VP8SSIMGetSquaredError(&stats[c]);
+ result[c] = GetPSNR(v);
+ }
+ // Accumulate forward
+ if (c < 4) VP8SSIMAddStats(&stats[c], &stats[4]);
}
- // Accumulate forward
- if (c < 4) VP8SSIMAddStats(&stats[c], &stats[4]);
}
return 1;
}
diff --git a/src/enc/quant.c b/src/enc/quant.c
index ea153849..b5d2d94c 100644
--- a/src/enc/quant.c
+++ b/src/enc/quant.c
@@ -229,10 +229,50 @@ static double QualityToCompression(double q) {
return (c < 0.75) ? c * (2. / 3.) : 2. * c - 1.;
}
+static int SegmentsAreEquivalent(const VP8SegmentInfo* const S1,
+ const VP8SegmentInfo* const S2) {
+ return (S1->quant_ == S2->quant_) && (S1->fstrength_ == S2->fstrength_);
+}
+
+static void SimplifySegments(VP8Encoder* const enc) {
+ int map[NUM_MB_SEGMENTS] = { 0, 1, 2, 3 };
+ const int num_segments = enc->segment_hdr_.num_segments_;
+ int num_final_segments = 1;
+ int s1, s2;
+ for (s1 = 1; s1 < num_segments; ++s1) { // find similar segments
+ const VP8SegmentInfo* const S1 = &enc->dqm_[s1];
+ int found = 0;
+ // check if we already have similar segment
+ for (s2 = 0; s2 < num_final_segments; ++s2) {
+ const VP8SegmentInfo* const S2 = &enc->dqm_[s2];
+ if (SegmentsAreEquivalent(S1, S2)) {
+ found = 1;
+ break;
+ }
+ }
+ map[s1] = s2;
+ if (!found) {
+ if (num_final_segments != s1) {
+ enc->dqm_[num_final_segments] = enc->dqm_[s1];
+ }
+ ++num_final_segments;
+ }
+ }
+ if (num_final_segments < num_segments) { // Remap
+ int i = enc->mb_w_ * enc->mb_h_;
+ while (i-- > 0) enc->mb_info_[i].segment_ = map[enc->mb_info_[i].segment_];
+ enc->segment_hdr_.num_segments_ = num_final_segments;
+ // Replicate the trailing segment infos (it's mostly cosmetics)
+ for (i = num_final_segments; i < num_segments; ++i) {
+ enc->dqm_[i] = enc->dqm_[num_final_segments - 1];
+ }
+ }
+}
+
void VP8SetSegmentParams(VP8Encoder* const enc, float quality) {
int i;
int dq_uv_ac, dq_uv_dc;
- const int num_segments = enc->config_->segments;
+ const int num_segments = enc->segment_hdr_.num_segments_;
const double amp = SNS_TO_DQ * enc->config_->sns_strength / 100. / 128.;
const double c_base = QualityToCompression(quality);
for (i = 0; i < num_segments; ++i) {
@@ -281,9 +321,11 @@ void VP8SetSegmentParams(VP8Encoder* const enc, float quality) {
enc->dq_uv_dc_ = dq_uv_dc;
enc->dq_uv_ac_ = dq_uv_ac;
- SetupMatrices(enc);
-
SetupFilterStrength(enc); // initialize segments' filtering, eventually
+
+ if (num_segments > 1) SimplifySegments(enc);
+
+ SetupMatrices(enc); // finalize quantization matrices
}
//------------------------------------------------------------------------------
diff --git a/src/enc/syntax.c b/src/enc/syntax.c
index 99c21fec..4b20c1aa 100644
--- a/src/enc/syntax.c
+++ b/src/enc/syntax.c
@@ -11,8 +11,9 @@
#include <assert.h>
-#include "./vp8enci.h"
+#include "../utils/utils.h"
#include "webp/format_constants.h"
+#include "./vp8enci.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
@@ -21,18 +22,6 @@ extern "C" {
//------------------------------------------------------------------------------
// Helper functions
-// TODO(later): Move to webp/format_constants.h?
-static void PutLE24(uint8_t* const data, uint32_t val) {
- data[0] = (val >> 0) & 0xff;
- data[1] = (val >> 8) & 0xff;
- data[2] = (val >> 16) & 0xff;
-}
-
-static void PutLE32(uint8_t* const data, uint32_t val) {
- PutLE24(data, val);
- data[3] = (val >> 24) & 0xff;
-}
-
static int IsVP8XNeeded(const VP8Encoder* const enc) {
return !!enc->has_alpha_; // Currently the only case when VP8X is needed.
// This could change in the future.
@@ -73,7 +62,7 @@ static WebPEncodingError PutVP8XHeader(const VP8Encoder* const enc) {
assert(pic->width <= MAX_CANVAS_SIZE && pic->height <= MAX_CANVAS_SIZE);
if (enc->has_alpha_) {
- flags |= ALPHA_FLAG_BIT;
+ flags |= ALPHA_FLAG;
}
PutLE32(vp8x + TAG_SIZE, VP8X_CHUNK_SIZE);
diff --git a/src/enc/vp8enci.h b/src/enc/vp8enci.h
index a0d9001f..0d043390 100644
--- a/src/enc/vp8enci.h
+++ b/src/enc/vp8enci.h
@@ -13,9 +13,9 @@
#define WEBP_ENC_VP8ENCI_H_
#include <string.h> // for memcpy()
+#include "webp/encode.h"
#include "../dsp/dsp.h"
#include "../utils/bit_writer.h"
-#include "webp/encode.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
@@ -27,10 +27,7 @@ extern "C" {
// version numbers
#define ENC_MAJ_VERSION 0
#define ENC_MIN_VERSION 2
-#define ENC_REV_VERSION 0
-
-// size of histogram used by CollectHistogram.
-#define MAX_COEFF_THRESH 64
+#define ENC_REV_VERSION 1
// intra prediction modes
enum { B_DC_PRED = 0, // 4x4 modes
@@ -162,6 +159,14 @@ static WEBP_INLINE int QUANTDIV(int n, int iQ, int B) {
}
extern const uint8_t VP8Zigzag[16];
+// size of histogram used by CollectHistogram.
+#define MAX_COEFF_THRESH 31
+typedef struct VP8Histogram VP8Histogram;
+struct VP8Histogram {
+ // TODO(skal): we only need to store the max_value and last_non_zero actually.
+ int distribution[MAX_COEFF_THRESH + 1];
+};
+
//------------------------------------------------------------------------------
// Headers
@@ -316,40 +321,27 @@ void VP8SetSegment(const VP8EncIterator* const it, int segment);
// WIP: #define USE_TOKEN_BUFFER
-#ifdef USE_TOKEN_BUFFER
-
-#define MAX_NUM_TOKEN 2048
-
-typedef struct VP8Tokens VP8Tokens;
-struct VP8Tokens {
- uint16_t tokens_[MAX_NUM_TOKEN]; // bit#15: bit, bits 0..14: slot
- int left_;
- VP8Tokens* next_;
-};
+typedef struct VP8Tokens VP8Tokens; // struct details in token.c
typedef struct {
- VP8Tokens* rows_;
- uint16_t* tokens_; // set to (*last_)->tokens_
- VP8Tokens** last_;
- int left_;
- int error_; // true in case of malloc error
+ VP8Tokens* pages_; // first page
+ VP8Tokens** last_page_; // last page
+ uint16_t* tokens_; // set to (*last_page_)->tokens_
+ int left_; // how many free tokens left before the page is full.
+ int error_; // true in case of malloc error
} VP8TBuffer;
void VP8TBufferInit(VP8TBuffer* const b); // initialize an empty buffer
-int VP8TBufferNewPage(VP8TBuffer* const b); // allocate a new page
-void VP8TBufferClear(VP8TBuffer* const b); // de-allocate memory
+
+#ifdef USE_TOKEN_BUFFER
+
+void VP8TBufferClear(VP8TBuffer* const b); // de-allocate pages memory
int VP8EmitTokens(const VP8TBuffer* const b, VP8BitWriter* const bw,
- const uint8_t* const probas);
-
-static WEBP_INLINE int VP8AddToken(VP8TBuffer* const b,
- int bit, int proba_idx) {
- if (b->left_ > 0 || VP8TBufferNewPage(b)) {
- const int slot = --b->left_;
- b->tokens_[slot] = (bit << 15) | proba_idx;
- }
- return bit;
-}
+ const uint8_t* const probas, int final_pass);
+int VP8RecordCoeffTokens(int ctx, int first, int last,
+ const int16_t* const coeffs, VP8TBuffer* tokens);
+void VP8TokenToStats(const VP8TBuffer* const b, proba_t* const stats);
#endif // USE_TOKEN_BUFFER
@@ -379,6 +371,10 @@ struct VP8Encoder {
int percent_; // for progress
+#ifdef USE_TOKEN_BUFFER
+ VP8TBuffer tokens_; // token buffer
+#endif
+
// transparency blob
int has_alpha_;
uint8_t* alpha_data_; // non-NULL if transparency is present
@@ -455,6 +451,11 @@ void VP8EncFreeBitWriters(VP8Encoder* const enc);
// in frame.c
extern const uint8_t VP8EncBands[16 + 1];
+extern const uint8_t VP8Cat3[];
+extern const uint8_t VP8Cat4[];
+extern const uint8_t VP8Cat5[];
+extern const uint8_t VP8Cat6[];
+
// Form all the four Intra16x16 predictions in the yuv_p_ cache
void VP8MakeLuma16Preds(const VP8EncIterator* const it);
// Form all the four Chroma8x8 predictions in the yuv_p_ cache
diff --git a/src/enc/vp8l.c b/src/enc/vp8l.c
index 41aa62b7..2798d670 100644
--- a/src/enc/vp8l.c
+++ b/src/enc/vp8l.c
@@ -37,7 +37,8 @@ extern "C" {
static int CompareColors(const void* p1, const void* p2) {
const uint32_t a = *(const uint32_t*)p1;
const uint32_t b = *(const uint32_t*)p2;
- return (a < b) ? -1 : (a > b) ? 1 : 0;
+ assert(a != b);
+ return (a < b) ? -1 : 1;
}
// If number of colors in the image is less than or equal to MAX_PALETTE_SIZE,
@@ -220,7 +221,7 @@ static int GetHuffBitLengthsAndCodes(
}
// Create Huffman trees.
- for (i = 0; i < histogram_image_size; ++i) {
+ for (i = 0; ok && (i < histogram_image_size); ++i) {
HuffmanTreeCode* const codes = &huffman_codes[5 * i];
VP8LHistogram* const histo = histogram_image->histograms[i];
ok = ok && VP8LCreateHuffmanTree(histo->literal_, 15, codes + 0);
@@ -231,7 +232,11 @@ static int GetHuffBitLengthsAndCodes(
}
End:
- if (!ok) free(mem_buf);
+ if (!ok) {
+ free(mem_buf);
+ // If one VP8LCreateHuffmanTree() above fails, we need to clean up behind.
+ memset(huffman_codes, 0, 5 * histogram_image_size * sizeof(*huffman_codes));
+ }
return ok;
}
@@ -406,9 +411,10 @@ static int StoreHuffmanCode(VP8LBitWriter* const bw,
}
static void WriteHuffmanCode(VP8LBitWriter* const bw,
- const HuffmanTreeCode* const code, int index) {
- const int depth = code->code_lengths[index];
- const int symbol = code->codes[index];
+ const HuffmanTreeCode* const code,
+ int code_index) {
+ const int depth = code->code_lengths[code_index];
+ const int symbol = code->codes[code_index];
VP8LWriteBits(bw, depth, symbol);
}
@@ -529,7 +535,12 @@ static int EncodeImageInternal(VP8LBitWriter* const bw,
sizeof(*histogram_symbols));
assert(histogram_bits >= MIN_HUFFMAN_BITS);
assert(histogram_bits <= MAX_HUFFMAN_BITS);
- if (histogram_image == NULL || histogram_symbols == NULL) goto Error;
+
+ if (histogram_image == NULL || histogram_symbols == NULL) {
+ free(histogram_image);
+ free(histogram_symbols);
+ return 0;
+ }
// Calculate backward references from ARGB image.
if (!VP8LGetBackwardReferences(width, height, argb, quality, cache_bits,
@@ -571,10 +582,10 @@ static int EncodeImageInternal(VP8LBitWriter* const bw,
uint32_t i;
if (histogram_argb == NULL) goto Error;
for (i = 0; i < histogram_image_xysize; ++i) {
- const int index = histogram_symbols[i] & 0xffff;
- histogram_argb[i] = 0xff000000 | (index << 8);
- if (index >= max_index) {
- max_index = index + 1;
+ const int symbol_index = histogram_symbols[i] & 0xffff;
+ histogram_argb[i] = 0xff000000 | (symbol_index << 8);
+ if (symbol_index >= max_index) {
+ max_index = symbol_index + 1;
}
}
histogram_image_size = max_index;
@@ -706,13 +717,6 @@ static int ApplyCrossColorFilter(const VP8LEncoder* const enc,
// -----------------------------------------------------------------------------
-static void PutLE32(uint8_t* const data, uint32_t val) {
- data[0] = (val >> 0) & 0xff;
- data[1] = (val >> 8) & 0xff;
- data[2] = (val >> 16) & 0xff;
- data[3] = (val >> 24) & 0xff;
-}
-
static WebPEncodingError WriteRiffHeader(const WebPPicture* const pic,
size_t riff_size, size_t vp8l_size) {
uint8_t riff[RIFF_HEADER_SIZE + CHUNK_HEADER_SIZE + VP8L_SIGNATURE_SIZE] = {
@@ -807,30 +811,24 @@ static WebPEncodingError AllocateTransformBuffer(VP8LEncoder* const enc,
return err;
}
-// Bundles multiple (2, 4 or 8) pixels into a single pixel.
-// Returns the new xsize.
-static void BundleColorMap(const WebPPicture* const pic,
- int xbits, uint32_t* bundled_argb, int xs) {
- int y;
- const int bit_depth = 1 << (3 - xbits);
- uint32_t code = 0;
- const uint32_t* argb = pic->argb;
- const int width = pic->width;
- const int height = pic->height;
-
- for (y = 0; y < height; ++y) {
- int x;
+// Bundles multiple (1, 2, 4 or 8) pixels into a single pixel.
+static void BundleColorMap(const uint8_t* const row, int width,
+ int xbits, uint32_t* const dst) {
+ int x;
+ if (xbits > 0) {
+ const int bit_depth = 1 << (3 - xbits);
+ const int mask = (1 << xbits) - 1;
+ uint32_t code = 0xff000000;
for (x = 0; x < width; ++x) {
- const int mask = (1 << xbits) - 1;
const int xsub = x & mask;
if (xsub == 0) {
- code = 0;
+ code = 0xff000000;
}
- // TODO(vikasa): simplify the bundling logic.
- code |= (argb[x] & 0xff00) << (bit_depth * xsub);
- bundled_argb[y * xs + (x >> xbits)] = 0xff000000 | code;
+ code |= row[x] << (8 + bit_depth * xsub);
+ dst[x >> xbits] = code;
}
- argb += pic->argb_stride;
+ } else {
+ for (x = 0; x < width; ++x) dst[x] = 0xff000000 | (row[x] << 8);
}
}
@@ -842,24 +840,43 @@ static WebPEncodingError ApplyPalette(VP8LBitWriter* const bw,
WebPEncodingError err = VP8_ENC_OK;
int i, x, y;
const WebPPicture* const pic = enc->pic_;
- uint32_t* argb = pic->argb;
+ uint32_t* src = pic->argb;
+ uint32_t* dst;
const int width = pic->width;
const int height = pic->height;
uint32_t* const palette = enc->palette_;
const int palette_size = enc->palette_size_;
+ uint8_t* row = NULL;
+ int xbits;
// Replace each input pixel by corresponding palette index.
+ // This is done line by line.
+ if (palette_size <= 4) {
+ xbits = (palette_size <= 2) ? 3 : 2;
+ } else {
+ xbits = (palette_size <= 16) ? 1 : 0;
+ }
+
+ err = AllocateTransformBuffer(enc, VP8LSubSampleSize(width, xbits), height);
+ if (err != VP8_ENC_OK) goto Error;
+ dst = enc->argb_;
+
+ row = WebPSafeMalloc((uint64_t)width, sizeof(*row));
+ if (row == NULL) return VP8_ENC_ERROR_OUT_OF_MEMORY;
+
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
- const uint32_t pix = argb[x];
+ const uint32_t pix = src[x];
for (i = 0; i < palette_size; ++i) {
if (pix == palette[i]) {
- argb[x] = 0xff000000u | (i << 8);
+ row[x] = i;
break;
}
}
}
- argb += pic->argb_stride;
+ BundleColorMap(row, width, xbits, dst);
+ src += pic->argb_stride;
+ dst += enc->current_width_;
}
// Save palette to bitstream.
@@ -875,20 +892,8 @@ static WebPEncodingError ApplyPalette(VP8LBitWriter* const bw,
goto Error;
}
- if (palette_size <= 16) {
- // Image can be packed (multiple pixels per uint32_t).
- int xbits = 1;
- if (palette_size <= 2) {
- xbits = 3;
- } else if (palette_size <= 4) {
- xbits = 2;
- }
- err = AllocateTransformBuffer(enc, VP8LSubSampleSize(width, xbits), height);
- if (err != VP8_ENC_OK) goto Error;
- BundleColorMap(pic, xbits, enc->argb_, enc->current_width_);
- }
-
Error:
+ free(row);
return err;
}
@@ -898,13 +903,13 @@ static int GetHistoBits(const WebPConfig* const config,
const WebPPicture* const pic) {
const int width = pic->width;
const int height = pic->height;
- const size_t hist_size = sizeof(VP8LHistogram);
+ const uint64_t hist_size = sizeof(VP8LHistogram);
// Make tile size a function of encoding method (Range: 0 to 6).
int histo_bits = 7 - config->method;
while (1) {
- const size_t huff_image_size = VP8LSubSampleSize(width, histo_bits) *
- VP8LSubSampleSize(height, histo_bits) *
- hist_size;
+ const uint64_t huff_image_size = VP8LSubSampleSize(width, histo_bits) *
+ VP8LSubSampleSize(height, histo_bits) *
+ hist_size;
if (huff_image_size <= MAX_HUFF_IMAGE_SIZE) break;
++histo_bits;
}
diff --git a/src/utils/bit_reader.c b/src/utils/bit_reader.c
index 1afb1db8..73eaafcb 100644
--- a/src/utils/bit_reader.c
+++ b/src/utils/bit_reader.c
@@ -169,7 +169,7 @@ void VP8LFillBitWindow(VP8LBitReader* const br) {
}
uint32_t VP8LReadOneBit(VP8LBitReader* const br) {
- const uint32_t val = (br->val_ >> br->bit_pos_) & 1;
+ const uint32_t val = (uint32_t)((br->val_ >> br->bit_pos_) & 1);
// Flag an error at end_of_stream.
if (!br->eos_) {
++br->bit_pos_;
@@ -198,7 +198,7 @@ uint32_t VP8LReadBits(VP8LBitReader* const br, int n_bits) {
if ((br->bit_pos_ + n_bits) > 64) return val;
}
}
- val = (br->val_ >> br->bit_pos_) & kBitMask[n_bits];
+ val = (uint32_t)((br->val_ >> br->bit_pos_) & kBitMask[n_bits]);
br->bit_pos_ += n_bits;
if (br->bit_pos_ >= 40) {
if (br->pos_ + 5 < br->len_) {
diff --git a/src/utils/bit_reader.h b/src/utils/bit_reader.h
index 11a40a55..a9c32436 100644
--- a/src/utils/bit_reader.h
+++ b/src/utils/bit_reader.h
@@ -24,11 +24,28 @@
extern "C" {
#endif
-#define BITS 32 // can be 32, 16 or 8
+//------------------------------------------------------------------------------
+// BITS can be either 32, 24, 16 or 8.
+// Pick values that fit natural register size.
+
+#if defined(__i386__) || defined(_M_IX86) // x86 32bit
+#define BITS 16
+#elif defined(__arm__) || defined(_M_ARM) // ARM
+#define BITS 8
+#else // reasonable default
+#define BITS 32
+#endif
+
+//------------------------------------------------------------------------------
+// Derived types and constants
+
#define MASK ((((bit_t)1) << (BITS)) - 1)
#if (BITS == 32)
typedef uint64_t bit_t; // natural register type
typedef uint32_t lbit_t; // natural type for memory I/O
+#elif (BITS == 24)
+typedef uint32_t bit_t;
+typedef uint32_t lbit_t;
#elif (BITS == 16)
typedef uint32_t bit_t;
typedef uint16_t lbit_t;
@@ -38,7 +55,7 @@ typedef uint8_t lbit_t;
#endif
//------------------------------------------------------------------------------
-// Bitreader and code-tree reader
+// Bitreader
typedef struct VP8BitReader VP8BitReader;
struct VP8BitReader {
@@ -80,21 +97,26 @@ static WEBP_INLINE void VP8LoadNewBytes(VP8BitReader* const br) {
lbit_t in_bits = *(lbit_t*)br->buf_;
br->buf_ += (BITS) >> 3;
#if !defined(__BIG_ENDIAN__)
-#if (BITS == 32)
+#if (BITS == 32) || (BITS == 24)
#if defined(__i386__) || defined(__x86_64__)
__asm__ volatile("bswap %k0" : "=r"(in_bits) : "0"(in_bits));
- bits = (bit_t)in_bits; // 32b -> 64b zero-extension
+ bits = (bit_t)in_bits; // 24b/32b -> 32b/64b zero-extension
#elif defined(_MSC_VER)
bits = _byteswap_ulong(in_bits);
#else
bits = (bit_t)(in_bits >> 24) | ((in_bits >> 8) & 0xff00)
| ((in_bits << 8) & 0xff0000) | (in_bits << 24);
#endif // x86
+#if (BITS == 24)
+ bits >>= 8;
+#endif
#elif (BITS == 16)
// gcc will recognize a 'rorw $8, ...' here:
bits = (bit_t)(in_bits >> 8) | ((in_bits & 0xff) << 8);
+#else // BITS == 8
+ bits = (bit_t)in_bits;
#endif
-#else // LITTLE_ENDIAN
+#else // BIG_ENDIAN
bits = (bit_t)in_bits;
#endif
br->value_ |= bits << br->missing_;
@@ -121,7 +143,7 @@ static WEBP_INLINE int VP8BitUpdate(VP8BitReader* const br, bit_t split) {
static WEBP_INLINE void VP8Shift(VP8BitReader* const br) {
// range_ is in [0..127] interval here.
- const int idx = br->range_ >> (BITS);
+ const bit_t idx = br->range_ >> (BITS);
const int shift = kVP8Log2Range[idx];
br->range_ = kVP8NewRange[idx];
br->value_ <<= shift;
@@ -149,7 +171,7 @@ static WEBP_INLINE int VP8GetSigned(VP8BitReader* const br, int v) {
// -----------------------------------------------------------------------------
-// Bitreader
+// Bitreader for lossless format
typedef struct {
uint64_t val_;
@@ -182,7 +204,7 @@ uint32_t VP8LReadOneBit(VP8LBitReader* const br);
// 32 times after the last VP8LFillBitWindow. Any subsequent calls
// (without VP8LFillBitWindow) will return invalid data.
static WEBP_INLINE uint32_t VP8LReadOneBitUnsafe(VP8LBitReader* const br) {
- const uint32_t val = (br->val_ >> br->bit_pos_) & 1;
+ const uint32_t val = (uint32_t)((br->val_ >> br->bit_pos_) & 1);
++br->bit_pos_;
return val;
}
diff --git a/src/utils/huffman_encode.c b/src/utils/huffman_encode.c
index 2686c665..49187592 100644
--- a/src/utils/huffman_encode.c
+++ b/src/utils/huffman_encode.c
@@ -138,13 +138,8 @@ static int CompareHuffmanTrees(const void* ptr1, const void* ptr2) {
} else if (t1->total_count_ < t2->total_count_) {
return 1;
} else {
- if (t1->value_ < t2->value_) {
- return -1;
- }
- if (t1->value_ > t2->value_) {
- return 1;
- }
- return 0;
+ assert(t1->value_ != t2->value_);
+ return (t1->value_ < t2->value_) ? -1 : 1;
}
}
@@ -193,6 +188,10 @@ static int GenerateOptimalTree(const int* const histogram, int histogram_size,
}
}
+ if (tree_size_orig == 0) { // pretty optimal already!
+ return 1;
+ }
+
// 3 * tree_size is enough to cover all the nodes representing a
// population and all the inserted nodes combining two existing nodes.
// The tree pool needs 2 * (tree_size_orig - 1) entities, and the
@@ -234,7 +233,7 @@ static int GenerateOptimalTree(const int* const histogram, int histogram_size,
tree_pool[tree_pool_size++] = tree[tree_size - 1];
tree_pool[tree_pool_size++] = tree[tree_size - 2];
count = tree_pool[tree_pool_size - 1].total_count_ +
- tree_pool[tree_pool_size - 2].total_count_;
+ tree_pool[tree_pool_size - 2].total_count_;
tree_size -= 2;
{
// Search for the insertion point.
diff --git a/src/utils/utils.c b/src/utils/utils.c
index 673b7e28..b1db2f9d 100644
--- a/src/utils/utils.c
+++ b/src/utils/utils.c
@@ -19,7 +19,8 @@ extern "C" {
//------------------------------------------------------------------------------
// Checked memory allocation
-static int CheckSizeArguments(uint64_t nmemb, size_t size) {
+// Returns 0 in case of overflow of nmemb * size.
+static int CheckSizeArgumentsOverflow(uint64_t nmemb, size_t size) {
const uint64_t total_size = nmemb * size;
if (nmemb == 0) return 1;
if ((uint64_t)size > WEBP_MAX_ALLOCABLE_MEMORY / nmemb) return 0;
@@ -28,12 +29,14 @@ static int CheckSizeArguments(uint64_t nmemb, size_t size) {
}
void* WebPSafeMalloc(uint64_t nmemb, size_t size) {
- if (!CheckSizeArguments(nmemb, size)) return NULL;
+ if (!CheckSizeArgumentsOverflow(nmemb, size)) return NULL;
+ assert(nmemb * size > 0);
return malloc((size_t)(nmemb * size));
}
void* WebPSafeCalloc(uint64_t nmemb, size_t size) {
- if (!CheckSizeArguments(nmemb, size)) return NULL;
+ if (!CheckSizeArgumentsOverflow(nmemb, size)) return NULL;
+ assert(nmemb * size > 0);
return calloc((size_t)nmemb, size);
}
diff --git a/src/utils/utils.h b/src/utils/utils.h
index aa445695..32dfb8a9 100644
--- a/src/utils/utils.h
+++ b/src/utils/utils.h
@@ -7,11 +7,14 @@
//
// Misc. common utility functions
//
-// Author: Skal (pascal.massimino@gmail.com)
+// Authors: Skal (pascal.massimino@gmail.com)
+// Urvang (urvang@google.com)
#ifndef WEBP_UTILS_UTILS_H_
#define WEBP_UTILS_UTILS_H_
+#include <assert.h>
+
#include "webp/types.h"
#if defined(__cplusplus) || defined(c_plusplus)
@@ -36,6 +39,40 @@ void* WebPSafeMalloc(uint64_t nmemb, size_t size);
void* WebPSafeCalloc(uint64_t nmemb, size_t size);
//------------------------------------------------------------------------------
+// Reading/writing data.
+
+// Read 16, 24 or 32 bits stored in little-endian order.
+static WEBP_INLINE int GetLE16(const uint8_t* const data) {
+ return (int)(data[0] << 0) | (data[1] << 8);
+}
+
+static WEBP_INLINE int GetLE24(const uint8_t* const data) {
+ return GetLE16(data) | (data[2] << 16);
+}
+
+static WEBP_INLINE uint32_t GetLE32(const uint8_t* const data) {
+ return (uint32_t)GetLE16(data) | (GetLE16(data + 2) << 16);
+}
+
+// Store 16, 24 or 32 bits in little-endian order.
+static WEBP_INLINE void PutLE16(uint8_t* const data, int val) {
+ assert(val < (1 << 16));
+ data[0] = (val >> 0);
+ data[1] = (val >> 8);
+}
+
+static WEBP_INLINE void PutLE24(uint8_t* const data, int val) {
+ assert(val < (1 << 24));
+ PutLE16(data, val & 0xffff);
+ data[2] = (val >> 16);
+}
+
+static WEBP_INLINE void PutLE32(uint8_t* const data, uint32_t val) {
+ PutLE16(data, (int)(val & 0xffff));
+ PutLE16(data + 2, (int)(val >> 16));
+}
+
+//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"