aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorVikas Arora <vikasa@google.com>2011-06-16 15:56:45 +0530
committerVikas Arora <vikasa@google.com>2011-06-16 15:56:45 +0530
commit7c970a0a679089e416c5887cf7fcece15a70bfa4 (patch)
treeae122b9f1d2fc0aa9d1c36974e37cc80cd1d3349 /src
parent03d5e34c70f174c16282b0efdc6bb9473df5f8f1 (diff)
downloadwebp-7c970a0a679089e416c5887cf7fcece15a70bfa4.tar.gz
Add WebP Encoder code to the /external/webp code-repo. With this change
in addtion to libwebp-decode static lib, libwebp-encode static library corresponding to WebP encoder will be build & installed. Change-Id: I809a427a6ad849ba7d16f56dd0b0bc5cba4768ec
Diffstat (limited to 'src')
-rw-r--r--src/Android.mk1
-rw-r--r--src/dec/Android.mk37
-rw-r--r--src/dec/Makefile.am9
-rw-r--r--src/dec/Makefile.in503
-rw-r--r--src/enc/Android.mk41
-rw-r--r--src/enc/analysis.c399
-rw-r--r--src/enc/bit_writer.c175
-rw-r--r--src/enc/bit_writer.h61
-rw-r--r--src/enc/config.c115
-rw-r--r--src/enc/cost.c491
-rw-r--r--src/enc/cost.h52
-rw-r--r--src/enc/dsp.c647
-rw-r--r--src/enc/filter.c378
-rw-r--r--src/enc/frame.c695
-rw-r--r--src/enc/iterator.c406
-rw-r--r--src/enc/picture.c316
-rw-r--r--src/enc/quant.c923
-rw-r--r--src/enc/syntax.c237
-rw-r--r--src/enc/tree.c507
-rw-r--r--src/enc/vp8enci.h471
-rw-r--r--src/enc/webpenc.c317
21 files changed, 6269 insertions, 512 deletions
diff --git a/src/Android.mk b/src/Android.mk
new file mode 100644
index 00000000..5053e7d6
--- /dev/null
+++ b/src/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)
diff --git a/src/dec/Android.mk b/src/dec/Android.mk
new file mode 100644
index 00000000..640a3dad
--- /dev/null
+++ b/src/dec/Android.mk
@@ -0,0 +1,37 @@
+# Copyright 2010 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := \
+ bits.c \
+ dsp.c \
+ frame.c \
+ idec.c \
+ quant.c \
+ tree.c \
+ vp8.c \
+ webp.c \
+ yuv.c
+
+LOCAL_CFLAGS := -DANDROID
+
+LOCAL_C_INCLUDES += \
+ $(LOCAL_PATH) \
+ $(LOCAL_PATH)/../../include
+
+LOCAL_MODULE:= libwebp-decode
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/src/dec/Makefile.am b/src/dec/Makefile.am
deleted file mode 100644
index 9aef27bb..00000000
--- a/src/dec/Makefile.am
+++ /dev/null
@@ -1,9 +0,0 @@
-AM_CPPFLAGS = -I$(top_buildir)/src
-lib_LTLIBRARIES = libwebpdecode.la
-
-libwebpdecode_la_SOURCES = bits.h vp8i.h yuv.h bits.c dsp.c frame.c \
- quant.c tree.c vp8.c webp.c yuv.c idec.c
-libwebpdecode_la_LDFLAGS = -version-info 0:0:0
-libwebpdecodeinclude_HEADERS = ../webp/decode.h ../webp/decode_vp8.h ../webp/types.h
-libwebpdecodeincludedir = $(includedir)/webp
-noinst_HEADERS = bits.h vp8i.h webpi.h yuv.h
diff --git a/src/dec/Makefile.in b/src/dec/Makefile.in
deleted file mode 100644
index e078bdfc..00000000
--- a/src/dec/Makefile.in
+++ /dev/null
@@ -1,503 +0,0 @@
-# Makefile.in generated by automake 1.10.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-
-VPATH = @srcdir@
-pkgdatadir = $(datadir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-subdir = src
-DIST_COMMON = $(libwebpdecodeinclude_HEADERS) $(noinst_HEADERS) \
- $(srcdir)/Makefile.am $(srcdir)/Makefile.in
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/configure.ac
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/config.h
-CONFIG_CLEAN_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
- $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
- *) f=$$p;; \
- esac;
-am__strip_dir = `echo $$p | sed -e 's|^.*/||'`;
-am__installdirs = "$(DESTDIR)$(libdir)" \
- "$(DESTDIR)$(libwebpdecodeincludedir)"
-libLTLIBRARIES_INSTALL = $(INSTALL)
-LTLIBRARIES = $(lib_LTLIBRARIES)
-libwebpdecode_la_LIBADD =
-am_libwebpdecode_la_OBJECTS = bits.lo dsp.lo frame.lo quant.lo tree.lo \
- vp8.lo webp.lo yuv.lo
-libwebpdecode_la_OBJECTS = $(am_libwebpdecode_la_OBJECTS)
-libwebpdecode_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
- $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
- $(libwebpdecode_la_LDFLAGS) $(LDFLAGS) -o $@
-DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
-depcomp = $(SHELL) $(top_srcdir)/depcomp
-am__depfiles_maybe = depfiles
-COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
- $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
-LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
- --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
- $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
-CCLD = $(CC)
-LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
- --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
- $(LDFLAGS) -o $@
-SOURCES = $(libwebpdecode_la_SOURCES)
-DIST_SOURCES = $(libwebpdecode_la_SOURCES)
-libwebpdecodeincludeHEADERS_INSTALL = $(INSTALL_HEADER)
-HEADERS = $(libwebpdecodeinclude_HEADERS) $(noinst_HEADERS)
-ETAGS = etags
-CTAGS = ctags
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMTAR = @AMTAR@
-AR = @AR@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CFLAGS = @CFLAGS@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CXX = @CXX@
-CXXCPP = @CXXCPP@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-DSYMUTIL = @DSYMUTIL@
-ECHO = @ECHO@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-F77 = @F77@
-FFLAGS = @FFLAGS@
-GREP = @GREP@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-LDFLAGS = @LDFLAGS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBTOOL = @LIBTOOL@
-LN_S = @LN_S@
-LTLIBOBJS = @LTLIBOBJS@
-MAKEINFO = @MAKEINFO@
-MKDIR_P = @MKDIR_P@
-NMEDIT = @NMEDIT@
-OBJEXT = @OBJEXT@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-RANLIB = @RANLIB@
-SED = @SED@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-STRIP = @STRIP@
-VERSION = @VERSION@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-ac_ct_F77 = @ac_ct_F77@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_vendor = @build_vendor@
-builddir = @builddir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-docdir = @docdir@
-dvidir = @dvidir@
-exec_prefix = @exec_prefix@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-AM_CPPFLAGS = -I$(top_buildir)/src
-lib_LTLIBRARIES = libwebpdecode.la
-libwebpdecode_la_SOURCES = bits.h vp8i.h yuv.h bits.c dsp.c frame.c quant.c tree.c vp8.c webp.c yuv.c idec.c
-libwebpdecode_la_LDFLAGS = -version-info 0:0:0
-libwebpdecodeinclude_HEADERS = ../webp/decode.h ../webp/decode_vp8.h ../webp/types.h
-libwebpdecodeincludedir = $(includedir)/webp
-noinst_HEADERS = bits.h vp8i.h webpi.h yuv.h
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .c .lo .o .obj
-$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
- && exit 0; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu src/Makefile'; \
- cd $(top_srcdir) && \
- $(AUTOMAKE) --gnu src/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-install-libLTLIBRARIES: $(lib_LTLIBRARIES)
- @$(NORMAL_INSTALL)
- test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)"
- @list='$(lib_LTLIBRARIES)'; for p in $$list; do \
- if test -f $$p; then \
- f=$(am__strip_dir) \
- echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(libLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(libdir)/$$f'"; \
- $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(libLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(libdir)/$$f"; \
- else :; fi; \
- done
-
-uninstall-libLTLIBRARIES:
- @$(NORMAL_UNINSTALL)
- @list='$(lib_LTLIBRARIES)'; for p in $$list; do \
- p=$(am__strip_dir) \
- echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$p'"; \
- $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$p"; \
- done
-
-clean-libLTLIBRARIES:
- -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES)
- @list='$(lib_LTLIBRARIES)'; for p in $$list; do \
- dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
- test "$$dir" != "$$p" || dir=.; \
- echo "rm -f \"$${dir}/so_locations\""; \
- rm -f "$${dir}/so_locations"; \
- done
-libwebpdecode.la: $(libwebpdecode_la_OBJECTS) $(libwebpdecode_la_DEPENDENCIES)
- $(libwebpdecode_la_LINK) -rpath $(libdir) $(libwebpdecode_la_OBJECTS) $(libwebpdecode_la_LIBADD) $(LIBS)
-
-mostlyclean-compile:
- -rm -f *.$(OBJEXT)
-
-distclean-compile:
- -rm -f *.tab.c
-
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bits.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dsp.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/frame.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/quant.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tree.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/vp8.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/webp.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/yuv.Plo@am__quote@
-
-.c.o:
-@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(COMPILE) -c $<
-
-.c.obj:
-@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'`
-
-.c.lo:
-@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
-@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $<
-
-mostlyclean-libtool:
- -rm -f *.lo
-
-clean-libtool:
- -rm -rf .libs _libs
-install-libwebpdecodeincludeHEADERS: $(libwebpdecodeinclude_HEADERS)
- @$(NORMAL_INSTALL)
- test -z "$(libwebpdecodeincludedir)" || $(MKDIR_P) "$(DESTDIR)$(libwebpdecodeincludedir)"
- @list='$(libwebpdecodeinclude_HEADERS)'; for p in $$list; do \
- if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
- f=$(am__strip_dir) \
- echo " $(libwebpdecodeincludeHEADERS_INSTALL) '$$d$$p' '$(DESTDIR)$(libwebpdecodeincludedir)/$$f'"; \
- $(libwebpdecodeincludeHEADERS_INSTALL) "$$d$$p" "$(DESTDIR)$(libwebpdecodeincludedir)/$$f"; \
- done
-
-uninstall-libwebpdecodeincludeHEADERS:
- @$(NORMAL_UNINSTALL)
- @list='$(libwebpdecodeinclude_HEADERS)'; for p in $$list; do \
- f=$(am__strip_dir) \
- echo " rm -f '$(DESTDIR)$(libwebpdecodeincludedir)/$$f'"; \
- rm -f "$(DESTDIR)$(libwebpdecodeincludedir)/$$f"; \
- done
-
-ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
- mkid -fID $$unique
-tags: TAGS
-
-TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
- $(TAGS_FILES) $(LISP)
- tags=; \
- here=`pwd`; \
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
- if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
- test -n "$$unique" || unique=$$empty_fix; \
- $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
- $$tags $$unique; \
- fi
-ctags: CTAGS
-CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
- $(TAGS_FILES) $(LISP)
- tags=; \
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
- test -z "$(CTAGS_ARGS)$$tags$$unique" \
- || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
- $$tags $$unique
-
-GTAGS:
- here=`$(am__cd) $(top_builddir) && pwd` \
- && cd $(top_srcdir) \
- && gtags -i $(GTAGS_ARGS) $$here
-
-distclean-tags:
- -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
- fi; \
- cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
- else \
- test -f $(distdir)/$$file \
- || cp -p $$d/$$file $(distdir)/$$file \
- || exit 1; \
- fi; \
- done
-check-am: all-am
-check: check-am
-all-am: Makefile $(LTLIBRARIES) $(HEADERS)
-installdirs:
- for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(libwebpdecodeincludedir)"; do \
- test -z "$$dir" || $(MKDIR_P) "$$dir"; \
- done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- `test -z '$(STRIP)' || \
- echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
-clean: clean-am
-
-clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \
- mostlyclean-am
-
-distclean: distclean-am
- -rm -rf ./$(DEPDIR)
- -rm -f Makefile
-distclean-am: clean-am distclean-compile distclean-generic \
- distclean-tags
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-info: info-am
-
-info-am:
-
-install-data-am: install-libwebpdecodeincludeHEADERS
-
-install-dvi: install-dvi-am
-
-install-exec-am: install-libLTLIBRARIES
-
-install-html: install-html-am
-
-install-info: install-info-am
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-ps: install-ps-am
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -rf ./$(DEPDIR)
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-compile mostlyclean-generic \
- mostlyclean-libtool
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-libLTLIBRARIES \
- uninstall-libwebpdecodeincludeHEADERS
-
-.MAKE: install-am install-strip
-
-.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
- clean-libLTLIBRARIES clean-libtool ctags distclean \
- distclean-compile distclean-generic distclean-libtool \
- distclean-tags distdir dvi dvi-am html html-am info info-am \
- install install-am install-data install-data-am install-dvi \
- install-dvi-am install-exec install-exec-am install-html \
- install-html-am install-info install-info-am \
- install-libLTLIBRARIES install-libwebpdecodeincludeHEADERS \
- install-man install-pdf install-pdf-am install-ps \
- install-ps-am install-strip installcheck installcheck-am \
- installdirs maintainer-clean maintainer-clean-generic \
- mostlyclean mostlyclean-compile mostlyclean-generic \
- mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \
- uninstall-am uninstall-libLTLIBRARIES \
- uninstall-libwebpdecodeincludeHEADERS
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/src/enc/Android.mk b/src/enc/Android.mk
new file mode 100644
index 00000000..d032d0aa
--- /dev/null
+++ b/src/enc/Android.mk
@@ -0,0 +1,41 @@
+# Copyright 2010 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := \
+ analysis.c \
+ bit_writer.c \
+ config.c \
+ cost.c \
+ dsp.c \
+ filter.c \
+ frame.c \
+ iterator.c \
+ picture.c \
+ quant.c \
+ syntax.c \
+ tree.c \
+ webpenc.c
+
+LOCAL_CFLAGS := -DANDROID
+
+LOCAL_C_INCLUDES += \
+ $(LOCAL_PATH) \
+ $(LOCAL_PATH)/../../include
+
+LOCAL_MODULE:= libwebp-encode
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/src/enc/analysis.c b/src/enc/analysis.c
new file mode 100644
index 00000000..41e12e87
--- /dev/null
+++ b/src/enc/analysis.c
@@ -0,0 +1,399 @@
+// Copyright 2011 Google Inc.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// Macroblock analysis
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include "vp8enci.h"
+#include "cost.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+#define MAX_COEFF_THRESH 64
+#define MAX_ITERS_K_MEANS 6
+
+//-----------------------------------------------------------------------------
+// Compute susceptibility based on DCT-coeff histograms:
+// the higher, the "easier" the macroblock is to compress.
+
+static int ClipAlpha(int alpha) {
+ return alpha < 0 ? 0 : alpha > 255 ? 255 : alpha;
+}
+
+static int GetAlpha(const int histo[MAX_COEFF_THRESH]) {
+ int num = 0, den = 0, val = 0;
+ int k;
+ int alpha;
+ for (k = 0; k < MAX_COEFF_THRESH; ++k) {
+ if (histo[k]) {
+ val += histo[k];
+ num += val * (k + 1);
+ den += (k + 1) * (k + 1);
+ }
+ }
+ // we scale the value to a usable [0..255] range
+ alpha = den ? 10 * num / den - 5 : 0;
+ return ClipAlpha(alpha);
+}
+
+static int CollectHistogram(const uint8_t* ref, const uint8_t* pred,
+ int start_block, int end_block) {
+ int histo[MAX_COEFF_THRESH] = { 0 };
+ int16_t out[16];
+ int j, k;
+ for (j = start_block; j < end_block; ++j) {
+ VP8FTransform(ref + VP8Scan[j], pred + VP8Scan[j], out);
+ for (k = 0; k < 16; ++k) {
+ const int v = abs(out[k]) >> 2;
+ if (v) {
+ const int bin = (v > MAX_COEFF_THRESH) ? MAX_COEFF_THRESH : v;
+ histo[bin - 1]++;
+ }
+ }
+ }
+ return GetAlpha(histo);
+}
+
+//-----------------------------------------------------------------------------
+// Smooth the segment map by replacing isolated block by the majority of its
+// neighbours.
+
+static void SmoothSegmentMap(VP8Encoder* const enc) {
+ int n, x, y;
+ const int w = enc->mb_w_;
+ const int h = enc->mb_h_;
+ const int majority_cnt_3_x_3_grid = 5;
+ uint8_t* tmp = (uint8_t*)malloc(w * h * sizeof(uint8_t));
+
+ if (tmp == NULL) return;
+ for (y = 1; y < h - 1; ++y) {
+ for (x = 1; x < w - 1; ++x) {
+ int cnt[NUM_MB_SEGMENTS] = { 0 };
+ const VP8MBInfo* const mb = &enc->mb_info_[x + w * y];
+ int majority_seg = mb->segment_;
+ // Check the 8 neighbouring segment values.
+ cnt[mb[-w - 1].segment_]++; // top-left
+ cnt[mb[-w + 0].segment_]++; // top
+ cnt[mb[-w + 1].segment_]++; // top-right
+ cnt[mb[ - 1].segment_]++; // left
+ cnt[mb[ + 1].segment_]++; // right
+ cnt[mb[ w - 1].segment_]++; // bottom-left
+ cnt[mb[ w + 0].segment_]++; // bottom
+ cnt[mb[ w + 1].segment_]++; // bottom-right
+ for (n = 0; n < NUM_MB_SEGMENTS; ++n) {
+ if (cnt[n] >= majority_cnt_3_x_3_grid) {
+ majority_seg = n;
+ }
+ }
+ tmp[x + y * w] = majority_seg;
+ }
+ }
+ for (y = 1; y < h - 1; ++y) {
+ for (x = 1; x < w - 1; ++x) {
+ VP8MBInfo* const mb = &enc->mb_info_[x + w * y];
+ mb->segment_ = tmp[x + y * w];
+ }
+ }
+ free(tmp);
+}
+
+//-----------------------------------------------------------------------------
+// Finalize Segment probability based on the coding tree
+
+static int GetProba(int a, int b) {
+ int proba;
+ const int total = a + b;
+ if (total == 0) return 255; // that's the default probability.
+ proba = (255 * a + total / 2) / total;
+ return proba;
+}
+
+static void SetSegmentProbas(VP8Encoder* const enc) {
+ int p[NUM_MB_SEGMENTS] = { 0 };
+ int n;
+
+ for (n = 0; n < enc->mb_w_ * enc->mb_h_; ++n) {
+ const VP8MBInfo* const mb = &enc->mb_info_[n];
+ p[mb->segment_]++;
+ }
+ if (enc->pic_->stats) {
+ for (n = 0; n < NUM_MB_SEGMENTS; ++n) {
+ enc->pic_->stats->segment_size[n] = p[n];
+ }
+ }
+ if (enc->segment_hdr_.num_segments_ > 1) {
+ uint8_t* const probas = enc->proba_.segments_;
+ probas[0] = GetProba(p[0] + p[1], p[2] + p[3]);
+ probas[1] = GetProba(p[0], p[1]);
+ probas[2] = GetProba(p[2], p[3]);
+
+ enc->segment_hdr_.update_map_ =
+ (probas[0] != 255) || (probas[1] != 255) || (probas[2] != 255);
+ enc->segment_hdr_.size_ =
+ p[0] * (VP8BitCost(0, probas[0]) + VP8BitCost(0, probas[1])) +
+ p[1] * (VP8BitCost(0, probas[0]) + VP8BitCost(1, probas[1])) +
+ p[2] * (VP8BitCost(1, probas[0]) + VP8BitCost(0, probas[2])) +
+ p[3] * (VP8BitCost(1, probas[0]) + VP8BitCost(1, probas[2]));
+ } else {
+ enc->segment_hdr_.update_map_ = 0;
+ enc->segment_hdr_.size_ = 0;
+ }
+}
+
+static inline int clip(int v, int m, int M) {
+ return v < m ? m : v > M ? M : v;
+}
+
+static void SetSegmentAlphas(VP8Encoder* const enc,
+ const int centers[NUM_MB_SEGMENTS],
+ int mid) {
+ const int nb = enc->segment_hdr_.num_segments_;
+ int min = centers[0], max = centers[0];
+ int n;
+
+ if (nb > 1) {
+ for (n = 0; n < nb; ++n) {
+ if (min > centers[n]) min = centers[n];
+ if (max < centers[n]) max = centers[n];
+ }
+ }
+ if (max == min) max = min + 1;
+ assert(mid <= max && mid >= min);
+ for (n = 0; n < nb; ++n) {
+ const int alpha = 255 * (centers[n] - mid) / (max - min);
+ const int beta = 255 * (centers[n] - min) / (max - min);
+ enc->dqm_[n].alpha_ = clip(alpha, -127, 127);
+ enc->dqm_[n].beta_ = clip(beta, 0, 255);
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Simplified k-Means, to assign Nb segments based on alpha-histogram
+
+static void AssignSegments(VP8Encoder* const enc, const int alphas[256]) {
+ const int nb = enc->segment_hdr_.num_segments_;
+ int centers[NUM_MB_SEGMENTS];
+ int weighted_average;
+ int map[256];
+ int a, n, k;
+ int min_a = 0, max_a = 255, range_a;
+ // 'int' type is ok for histo, and won't overflow
+ int accum[NUM_MB_SEGMENTS], dist_accum[NUM_MB_SEGMENTS];
+
+ // bracket the input
+ for (n = 0; n < 256 && alphas[n] == 0; ++n) {}
+ min_a = n;
+ for (n = 255; n > min_a && alphas[n] == 0; --n) {}
+ max_a = n;
+ range_a = max_a - min_a;
+
+ // Spread initial centers evenly
+ for (n = 1, k = 0; n < 2 * nb; n += 2) {
+ centers[k++] = min_a + (n * range_a) / (2 * nb);
+ }
+
+ for (k = 0; k < MAX_ITERS_K_MEANS; ++k) { // few iters are enough
+ int total_weight;
+ int displaced;
+ // Reset stats
+ for (n = 0; n < nb; ++n) {
+ accum[n] = 0;
+ dist_accum[n] = 0;
+ }
+ // Assign nearest center for each 'a'
+ n = 0; // track the nearest center for current 'a'
+ for (a = min_a; a <= max_a; ++a) {
+ if (alphas[a]) {
+ while (n < nb - 1 && abs(a - centers[n + 1]) < abs(a - centers[n])) {
+ n++;
+ }
+ map[a] = n;
+ // accumulate contribution into best centroid
+ dist_accum[n] += a * alphas[a];
+ accum[n] += alphas[a];
+ }
+ }
+ // All point are classified. Move the centroids to the
+ // center of their respective cloud.
+ displaced = 0;
+ weighted_average = 0;
+ total_weight = 0;
+ for (n = 0; n < nb; ++n) {
+ if (accum[n]) {
+ const int new_center = (dist_accum[n] + accum[n] / 2) / accum[n];
+ displaced += abs(centers[n] - new_center);
+ centers[n] = new_center;
+ weighted_average += new_center * accum[n];
+ total_weight += accum[n];
+ }
+ }
+ weighted_average = (weighted_average + total_weight / 2) / total_weight;
+ if (displaced < 5) break; // no need to keep on looping...
+ }
+
+ // Map each original value to the closest centroid
+ for (n = 0; n < enc->mb_w_ * enc->mb_h_; ++n) {
+ VP8MBInfo* const mb = &enc->mb_info_[n];
+ const int a = mb->alpha_;
+ mb->segment_ = map[a];
+ mb->alpha_ = centers[map[a]]; // just for the record.
+ }
+
+ if (nb > 1) {
+ const int smooth = (enc->config_->preprocessing & 1);
+ if (smooth) SmoothSegmentMap(enc);
+ }
+
+ SetSegmentProbas(enc); // Assign final proba
+ SetSegmentAlphas(enc, centers, weighted_average); // pick some alphas.
+}
+
+//-----------------------------------------------------------------------------
+// Macroblock analysis: collect histogram for each mode, deduce the maximal
+// susceptibility and set best modes for this macroblock.
+// Segment assignment is done later.
+
+// Number of modes to inspect for alpha_ evaluation. For high-quality settings,
+// we don't need to test all the possible modes during the analysis phase.
+#define MAX_INTRA16_MODE 2
+#define MAX_INTRA4_MODE 2
+#define MAX_UV_MODE 2
+
+static int MBAnalyzeBestIntra16Mode(VP8EncIterator* const it) {
+ const int max_mode = (it->enc_->method_ >= 3) ? MAX_INTRA16_MODE : 4;
+ int mode;
+ int best_alpha = -1;
+ int best_mode = 0;
+
+ VP8MakeLuma16Preds(it);
+ for (mode = 0; mode < max_mode; ++mode) {
+ const int alpha = CollectHistogram(it->yuv_in_ + Y_OFF,
+ it->yuv_p_ + VP8I16ModeOffsets[mode],
+ 0, 16);
+ if (alpha > best_alpha) {
+ best_alpha = alpha;
+ best_mode = mode;
+ }
+ }
+ VP8SetIntra16Mode(it, best_mode);
+ return best_alpha;
+}
+
+static int MBAnalyzeBestIntra4Mode(VP8EncIterator* const it,
+ int best_alpha) {
+ int modes[16];
+ const int max_mode = (it->enc_->method_ >= 3) ? MAX_INTRA4_MODE : NUM_BMODES;
+ int i4_alpha = 0;
+ VP8IteratorStartI4(it);
+ do {
+ int mode;
+ int best_mode_alpha = -1;
+ const uint8_t* const src = it->yuv_in_ + Y_OFF + VP8Scan[it->i4_];
+
+ VP8MakeIntra4Preds(it);
+ for (mode = 0; mode < max_mode; ++mode) {
+ const int alpha = CollectHistogram(src,
+ it->yuv_p_ + VP8I4ModeOffsets[mode],
+ 0, 1);
+ if (alpha > best_mode_alpha) {
+ best_mode_alpha = alpha;
+ modes[it->i4_] = mode;
+ }
+ }
+ i4_alpha += best_mode_alpha;
+ // Note: we reuse the original samples for predictors
+ } while (VP8IteratorRotateI4(it, it->yuv_in_ + Y_OFF));
+
+ if (i4_alpha > best_alpha) {
+ VP8SetIntra4Mode(it, modes);
+ best_alpha = ClipAlpha(i4_alpha);
+ }
+ return best_alpha;
+}
+
+static int MBAnalyzeBestUVMode(VP8EncIterator* const it) {
+ int best_alpha = -1;
+ int best_mode = 0;
+ const int max_mode = (it->enc_->method_ >= 3) ? MAX_UV_MODE : 4;
+ int mode;
+ VP8MakeChroma8Preds(it);
+ for (mode = 0; mode < max_mode; ++mode) {
+ const int alpha = CollectHistogram(it->yuv_in_ + U_OFF,
+ it->yuv_p_ + VP8UVModeOffsets[mode],
+ 16, 16 + 4 + 4);
+ if (alpha > best_alpha) {
+ best_alpha = alpha;
+ best_mode = mode;
+ }
+ }
+ VP8SetIntraUVMode(it, best_mode);
+ return best_alpha;
+}
+
+static void MBAnalyze(VP8EncIterator* const it,
+ int alphas[256], int* const uv_alpha) {
+ const VP8Encoder* const enc = it->enc_;
+ int best_alpha, best_uv_alpha;
+
+ VP8SetIntra16Mode(it, 0); // default: Intra16, DC_PRED
+ VP8SetSkip(it, 0); // not skipped
+ VP8SetSegment(it, 0); // default segment, spec-wise.
+
+ best_alpha = MBAnalyzeBestIntra16Mode(it);
+ if (enc->method_ != 3) {
+ // We go and make a fast decision for intra4/intra16.
+ // It's usually not a good and definitive pick, but helps seeding the stats
+ // about level bit-cost.
+ // TODO(skal): improve criterion.
+ best_alpha = MBAnalyzeBestIntra4Mode(it, best_alpha);
+ }
+ best_uv_alpha = MBAnalyzeBestUVMode(it);
+
+ // Final susceptibility mix
+ best_alpha = (best_alpha + best_uv_alpha + 1) / 2;
+ alphas[best_alpha]++;
+ *uv_alpha += best_uv_alpha;
+ it->mb_->alpha_ = best_alpha; // Informative only.
+}
+
+//-----------------------------------------------------------------------------
+// Main analysis loop:
+// Collect all susceptibilities for each macroblock and record their
+// distribution in alphas[]. Segments is assigned a-posteriori, based on
+// this histogram.
+// We also pick an intra16 prediction mode, which shouldn't be considered
+// final except for fast-encode settings. We can also pick some intra4 modes
+// and decide intra4/intra16, but that's usually almost always a bad choice at
+// this stage.
+
+int VP8EncAnalyze(VP8Encoder* const enc) {
+ int alphas[256] = { 0 };
+ VP8EncIterator it;
+
+ VP8IteratorInit(enc, &it);
+ enc->uv_alpha_ = 0;
+ do {
+ VP8IteratorImport(&it);
+ MBAnalyze(&it, alphas, &enc->uv_alpha_);
+ // Let's pretend we have perfect lossless reconstruction.
+ } while (VP8IteratorNext(&it, it.yuv_in_));
+ enc->uv_alpha_ /= enc->mb_w_ * enc->mb_h_;
+ AssignSegments(enc, alphas);
+
+ return 1;
+}
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/src/enc/bit_writer.c b/src/enc/bit_writer.c
new file mode 100644
index 00000000..3656a7e2
--- /dev/null
+++ b/src/enc/bit_writer.c
@@ -0,0 +1,175 @@
+// Copyright 2011 Google Inc.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// Bit writing and boolean coder
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#include <assert.h>
+#include <stdlib.h>
+#include "vp8enci.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+//-----------------------------------------------------------------------------
+// VP8BitWriter
+
+static int BitWriterResize(VP8BitWriter* const bw, size_t extra_size) {
+ uint8_t* new_buf;
+ size_t new_size;
+ const size_t needed_size = bw->pos_ + extra_size;
+ if (needed_size <= bw->max_pos_) return 1;
+ new_size = 2 * bw->max_pos_;
+ if (new_size < needed_size)
+ new_size = needed_size;
+ if (new_size < 1024) new_size = 1024;
+ new_buf = (uint8_t*)malloc(new_size);
+ if (new_buf == NULL) {
+ bw->error_ = 1;
+ return 0;
+ }
+ if (bw->pos_ > 0) memcpy(new_buf, bw->buf_, bw->pos_);
+ free(bw->buf_);
+ bw->buf_ = new_buf;
+ bw->max_pos_ = new_size;
+ return 1;
+}
+
+static void kFlush(VP8BitWriter* const bw) {
+ const int s = 8 + bw->nb_bits_;
+ const int32_t bits = bw->value_ >> s;
+ assert(bw->nb_bits_ >= 0);
+ bw->value_ -= bits << s;
+ bw->nb_bits_ -= 8;
+ if ((bits & 0xff) != 0xff) {
+ size_t pos = bw->pos_;
+ if (pos + bw->run_ >= bw->max_pos_) { // reallocate
+ if (!BitWriterResize(bw, bw->run_ + 1)) {
+ return;
+ }
+ }
+ if (bits & 0x100) { // overflow -> propagate carry over pending 0xff's
+ if (pos > 0) bw->buf_[pos - 1]++;
+ }
+ if (bw->run_ > 0) {
+ const int value = (bits & 0x100) ? 0x00 : 0xff;
+ for (; bw->run_ > 0; --bw->run_) bw->buf_[pos++] = value;
+ }
+ bw->buf_[pos++] = bits;
+ bw->pos_ = pos;
+ } else {
+ bw->run_++; // delay writing of bytes 0xff, pending eventual carry.
+ }
+}
+
+//-----------------------------------------------------------------------------
+// renormalization
+
+static const uint8_t kNorm[128] = { // renorm_sizes[i] = 8 - log2(i)
+ 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 0
+};
+
+// range = ((range + 1) << kVP8Log2Range[range]) - 1
+const uint8_t kNewRange[128] = {
+ 127, 127, 191, 127, 159, 191, 223, 127, 143, 159, 175, 191, 207, 223, 239,
+ 127, 135, 143, 151, 159, 167, 175, 183, 191, 199, 207, 215, 223, 231, 239,
+ 247, 127, 131, 135, 139, 143, 147, 151, 155, 159, 163, 167, 171, 175, 179,
+ 183, 187, 191, 195, 199, 203, 207, 211, 215, 219, 223, 227, 231, 235, 239,
+ 243, 247, 251, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149,
+ 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179,
+ 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209,
+ 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239,
+ 241, 243, 245, 247, 249, 251, 253, 127
+};
+
+int VP8PutBit(VP8BitWriter* const bw, int bit, int prob) {
+ const int split = (bw->range_ * prob) >> 8;
+ if (bit) {
+ bw->value_ += split + 1;
+ bw->range_ -= split + 1;
+ } else {
+ bw->range_ = split;
+ }
+ if (bw->range_ < 127) { // emit 'shift' bits out and renormalize
+ const int shift = kNorm[bw->range_];
+ bw->range_ = kNewRange[bw->range_];
+ bw->value_ <<= shift;
+ bw->nb_bits_ += shift;
+ if (bw->nb_bits_ > 0) kFlush(bw);
+ }
+ return bit;
+}
+
+int VP8PutBitUniform(VP8BitWriter* const bw, int bit) {
+ const int split = bw->range_ >> 1;
+ if (bit) {
+ bw->value_ += split + 1;
+ bw->range_ -= split + 1;
+ } else {
+ bw->range_ = split;
+ }
+ if (bw->range_ < 127) {
+ bw->range_ = kNewRange[bw->range_];
+ bw->value_ <<= 1;
+ bw->nb_bits_ += 1;
+ if (bw->nb_bits_ > 0) kFlush(bw);
+ }
+ return bit;
+}
+
+void VP8PutValue(VP8BitWriter* const bw, int value, int nb_bits) {
+ int mask;
+ for (mask = 1 << (nb_bits - 1); mask; mask >>= 1)
+ VP8PutBitUniform(bw, value & mask);
+}
+
+void VP8PutSignedValue(VP8BitWriter* const bw, int value, int nb_bits) {
+ if (!VP8PutBitUniform(bw, value != 0))
+ return;
+ if (value < 0) {
+ VP8PutValue(bw, ((-value) << 1) | 1, nb_bits + 1);
+ } else {
+ VP8PutValue(bw, value << 1, nb_bits + 1);
+ }
+}
+
+//-----------------------------------------------------------------------------
+
+int VP8BitWriterInit(VP8BitWriter* const bw, size_t expected_size) {
+ bw->range_ = 255 - 1;
+ bw->value_ = 0;
+ bw->run_ = 0;
+ bw->nb_bits_ = -8;
+ bw->pos_ = 0;
+ bw->max_pos_ = 0;
+ bw->error_ = 0;
+ bw->buf_ = NULL;
+ return (expected_size > 0) ? BitWriterResize(bw, expected_size) : 1;
+}
+
+uint8_t* VP8BitWriterFinish(VP8BitWriter* const bw) {
+ VP8PutValue(bw, 0, 9 - bw->nb_bits_);
+ bw->nb_bits_ = 0; // pad with zeroes
+ kFlush(bw);
+ return bw->buf_;
+}
+
+//-----------------------------------------------------------------------------
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/src/enc/bit_writer.h b/src/enc/bit_writer.h
new file mode 100644
index 00000000..3773c9cb
--- /dev/null
+++ b/src/enc/bit_writer.h
@@ -0,0 +1,61 @@
+// Copyright 2011 Google Inc.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// Bit writing and boolean coder
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#ifndef WEBP_ENC_BIT_WRITER_H_
+#define WEBP_ENC_BIT_WRITER_H_
+
+#include "vp8enci.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+//-----------------------------------------------------------------------------
+// Bit-writing
+
+typedef struct VP8BitWriter VP8BitWriter;
+struct VP8BitWriter {
+ int32_t range_; // range-1
+ int32_t value_;
+ int run_; // number of outstanding bits
+ int nb_bits_; // number of pending bits
+ uint8_t* buf_;
+ size_t pos_;
+ size_t max_pos_;
+ int error_; // true in case of error
+};
+
+int VP8BitWriterInit(VP8BitWriter* const bw, size_t expected_size);
+uint8_t* VP8BitWriterFinish(VP8BitWriter* const bw);
+int VP8PutBit(VP8BitWriter* const bw, int bit, int prob);
+int VP8PutBitUniform(VP8BitWriter* const bw, int bit);
+void VP8PutValue(VP8BitWriter* const bw, int value, int nb_bits);
+void VP8PutSignedValue(VP8BitWriter* const bw, int value, int nb_bits);
+
+// return approximate write position (in bits)
+static inline uint64_t VP8BitWriterPos(const VP8BitWriter* const bw) {
+ return (uint64_t)(bw->pos_ + bw->run_) * 8 + 8 + bw->nb_bits_;
+}
+
+static inline uint8_t* VP8BitWriterBuf(const VP8BitWriter* const bw) {
+ return bw->buf_;
+}
+static inline size_t VP8BitWriterSize(const VP8BitWriter* const bw) {
+ return bw->pos_;
+}
+
+//-----------------------------------------------------------------------------
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
+
+#endif // WEBP_ENC_BIT_WRITER_H_
diff --git a/src/enc/config.c b/src/enc/config.c
new file mode 100644
index 00000000..86ef5ce2
--- /dev/null
+++ b/src/enc/config.c
@@ -0,0 +1,115 @@
+// Copyright 2011 Google Inc.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// Coding tools configuration
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#include <assert.h>
+#include "webp/encode.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+//-----------------------------------------------------------------------------
+// WebPConfig
+//-----------------------------------------------------------------------------
+
+int WebPConfigInitInternal(WebPConfig* const config,
+ WebPPreset preset, float quality, int version) {
+ if (version != WEBP_ENCODER_ABI_VERSION) {
+ return 0; // caller/system version mismatch!
+ }
+ if (config == NULL) return 0;
+
+ config->quality = quality;
+ config->target_size = 0;
+ config->target_PSNR = 0.;
+ config->method = 4;
+ config->sns_strength = 50;
+ config->filter_strength = 20; // default: light filtering
+ config->filter_sharpness = 0;
+ config->filter_type = 0; // default: simple
+ config->partitions = 0;
+ config->segments = 4;
+ config->pass = 1;
+ config->show_compressed = 0;
+ config->preprocessing = 0;
+ config->autofilter = 0;
+
+ // TODO(skal): tune.
+ switch (preset) {
+ case WEBP_PRESET_PICTURE:
+ config->sns_strength = 80;
+ config->filter_sharpness = 4;
+ config->filter_strength = 35;
+ break;
+ case WEBP_PRESET_PHOTO:
+ config->sns_strength = 80;
+ config->filter_sharpness = 3;
+ config->filter_strength = 30;
+ break;
+ case WEBP_PRESET_DRAWING:
+ config->sns_strength = 25;
+ config->filter_sharpness = 6;
+ config->filter_strength = 10;
+ break;
+ case WEBP_PRESET_ICON:
+ config->sns_strength = 0;
+ config->filter_strength = 0; // disable filtering to retain sharpness
+ break;
+ case WEBP_PRESET_TEXT:
+ config->sns_strength = 0;
+ config->filter_strength = 0; // disable filtering to retain sharpness
+ config->segments = 2;
+ break;
+ case WEBP_PRESET_DEFAULT:
+ default:
+ break;
+ }
+ return WebPValidateConfig(config);
+}
+
+int WebPValidateConfig(const WebPConfig* const config) {
+ if (config == NULL) return 0;
+ if (config->quality < 0 || config->quality > 100)
+ return 0;
+ if (config->target_size < 0)
+ return 0;
+ if (config->target_PSNR < 0)
+ return 0;
+ if (config->method < 0 || config->method > 6)
+ return 0;
+ if (config->segments < 1 || config->segments > 4)
+ return 0;
+ if (config->sns_strength < 0 || config->sns_strength > 100)
+ return 0;
+ if (config->filter_strength < 0 || config->filter_strength > 100)
+ return 0;
+ if (config->filter_sharpness < 0 || config->filter_sharpness > 7)
+ return 0;
+ if (config->filter_type < 0 || config->filter_type > 1)
+ return 0;
+ if (config->autofilter < 0 || config->autofilter > 1)
+ return 0;
+ if (config->pass < 1 || config->pass > 10)
+ return 0;
+ if (config->show_compressed < 0 || config->show_compressed > 1)
+ return 0;
+ if (config->preprocessing < 0 || config->preprocessing > 1)
+ return 0;
+ if (config->partitions < 0 || config->partitions > 3)
+ return 0;
+ return 1;
+}
+
+//-----------------------------------------------------------------------------
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/src/enc/cost.c b/src/enc/cost.c
new file mode 100644
index 00000000..f765598a
--- /dev/null
+++ b/src/enc/cost.c
@@ -0,0 +1,491 @@
+// Copyright 2011 Google Inc.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// Cost tables for level and modes
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#include <assert.h>
+
+#include "cost.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+//-----------------------------------------------------------------------------
+// Boolean-cost cost table
+
+const uint16_t VP8EntropyCost[256] = {
+ 1792, 1792, 1792, 1536, 1536, 1408, 1366, 1280, 1280, 1216,
+ 1178, 1152, 1110, 1076, 1061, 1024, 1024, 992, 968, 951,
+ 939, 911, 896, 878, 871, 854, 838, 820, 811, 794,
+ 786, 768, 768, 752, 740, 732, 720, 709, 704, 690,
+ 683, 672, 666, 655, 647, 640, 631, 622, 615, 607,
+ 598, 592, 586, 576, 572, 564, 559, 555, 547, 541,
+ 534, 528, 522, 512, 512, 504, 500, 494, 488, 483,
+ 477, 473, 467, 461, 458, 452, 448, 443, 438, 434,
+ 427, 424, 419, 415, 410, 406, 403, 399, 394, 390,
+ 384, 384, 377, 374, 370, 366, 362, 359, 355, 351,
+ 347, 342, 342, 336, 333, 330, 326, 323, 320, 316,
+ 312, 308, 305, 302, 299, 296, 293, 288, 287, 283,
+ 280, 277, 274, 272, 268, 266, 262, 256, 256, 256,
+ 251, 248, 245, 242, 240, 237, 234, 232, 228, 226,
+ 223, 221, 218, 216, 214, 211, 208, 205, 203, 201,
+ 198, 196, 192, 191, 188, 187, 183, 181, 179, 176,
+ 175, 171, 171, 168, 165, 163, 160, 159, 156, 154,
+ 152, 150, 148, 146, 144, 142, 139, 138, 135, 133,
+ 131, 128, 128, 125, 123, 121, 119, 117, 115, 113,
+ 111, 110, 107, 105, 103, 102, 100, 98, 96, 94,
+ 92, 91, 89, 86, 86, 83, 82, 80, 77, 76,
+ 74, 73, 71, 69, 67, 66, 64, 63, 61, 59,
+ 57, 55, 54, 52, 51, 49, 47, 46, 44, 43,
+ 41, 40, 38, 36, 35, 33, 32, 30, 29, 27,
+ 25, 24, 22, 21, 19, 18, 16, 15, 13, 12,
+ 10, 9, 7, 6, 4, 3
+};
+
+//-----------------------------------------------------------------------------
+// Level cost tables
+
+// For each given level, the following table given the pattern of contexts
+// to use for coding it (in [][0]) as well as the bit value to use for
+// each context (in [][1]).
+static const uint16_t kLevelCodes[MAX_VARIABLE_LEVEL][2] = {
+ {0x001, 0x000}, {0x007, 0x001}, {0x00f, 0x005},
+ {0x00f, 0x00d}, {0x033, 0x003}, {0x033, 0x003}, {0x033, 0x023},
+ {0x033, 0x023}, {0x033, 0x023}, {0x033, 0x023}, {0x0d3, 0x013},
+ {0x0d3, 0x013}, {0x0d3, 0x013}, {0x0d3, 0x013}, {0x0d3, 0x013},
+ {0x0d3, 0x013}, {0x0d3, 0x013}, {0x0d3, 0x013}, {0x0d3, 0x093},
+ {0x0d3, 0x093}, {0x0d3, 0x093}, {0x0d3, 0x093}, {0x0d3, 0x093},
+ {0x0d3, 0x093}, {0x0d3, 0x093}, {0x0d3, 0x093}, {0x0d3, 0x093},
+ {0x0d3, 0x093}, {0x0d3, 0x093}, {0x0d3, 0x093}, {0x0d3, 0x093},
+ {0x0d3, 0x093}, {0x0d3, 0x093}, {0x0d3, 0x093}, {0x153, 0x053},
+ {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053},
+ {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053},
+ {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053},
+ {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053},
+ {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053},
+ {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053},
+ {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053},
+ {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x153}
+};
+
+// fixed costs for coding levels, deduce from the coding tree.
+// This is only the part that doesn't depend on the probability state.
+const uint16_t VP8LevelFixedCosts[2048] = {
+ 0, 256, 256, 256, 256, 432, 618, 630,
+ 731, 640, 640, 828, 901, 948, 1021, 1101,
+ 1174, 1221, 1294, 1042, 1085, 1115, 1158, 1202,
+ 1245, 1275, 1318, 1337, 1380, 1410, 1453, 1497,
+ 1540, 1570, 1613, 1280, 1295, 1317, 1332, 1358,
+ 1373, 1395, 1410, 1454, 1469, 1491, 1506, 1532,
+ 1547, 1569, 1584, 1601, 1616, 1638, 1653, 1679,
+ 1694, 1716, 1731, 1775, 1790, 1812, 1827, 1853,
+ 1868, 1890, 1905, 1727, 1733, 1742, 1748, 1759,
+ 1765, 1774, 1780, 1800, 1806, 1815, 1821, 1832,
+ 1838, 1847, 1853, 1878, 1884, 1893, 1899, 1910,
+ 1916, 1925, 1931, 1951, 1957, 1966, 1972, 1983,
+ 1989, 1998, 2004, 2027, 2033, 2042, 2048, 2059,
+ 2065, 2074, 2080, 2100, 2106, 2115, 2121, 2132,
+ 2138, 2147, 2153, 2178, 2184, 2193, 2199, 2210,
+ 2216, 2225, 2231, 2251, 2257, 2266, 2272, 2283,
+ 2289, 2298, 2304, 2168, 2174, 2183, 2189, 2200,
+ 2206, 2215, 2221, 2241, 2247, 2256, 2262, 2273,
+ 2279, 2288, 2294, 2319, 2325, 2334, 2340, 2351,
+ 2357, 2366, 2372, 2392, 2398, 2407, 2413, 2424,
+ 2430, 2439, 2445, 2468, 2474, 2483, 2489, 2500,
+ 2506, 2515, 2521, 2541, 2547, 2556, 2562, 2573,
+ 2579, 2588, 2594, 2619, 2625, 2634, 2640, 2651,
+ 2657, 2666, 2672, 2692, 2698, 2707, 2713, 2724,
+ 2730, 2739, 2745, 2540, 2546, 2555, 2561, 2572,
+ 2578, 2587, 2593, 2613, 2619, 2628, 2634, 2645,
+ 2651, 2660, 2666, 2691, 2697, 2706, 2712, 2723,
+ 2729, 2738, 2744, 2764, 2770, 2779, 2785, 2796,
+ 2802, 2811, 2817, 2840, 2846, 2855, 2861, 2872,
+ 2878, 2887, 2893, 2913, 2919, 2928, 2934, 2945,
+ 2951, 2960, 2966, 2991, 2997, 3006, 3012, 3023,
+ 3029, 3038, 3044, 3064, 3070, 3079, 3085, 3096,
+ 3102, 3111, 3117, 2981, 2987, 2996, 3002, 3013,
+ 3019, 3028, 3034, 3054, 3060, 3069, 3075, 3086,
+ 3092, 3101, 3107, 3132, 3138, 3147, 3153, 3164,
+ 3170, 3179, 3185, 3205, 3211, 3220, 3226, 3237,
+ 3243, 3252, 3258, 3281, 3287, 3296, 3302, 3313,
+ 3319, 3328, 3334, 3354, 3360, 3369, 3375, 3386,
+ 3392, 3401, 3407, 3432, 3438, 3447, 3453, 3464,
+ 3470, 3479, 3485, 3505, 3511, 3520, 3526, 3537,
+ 3543, 3552, 3558, 2816, 2822, 2831, 2837, 2848,
+ 2854, 2863, 2869, 2889, 2895, 2904, 2910, 2921,
+ 2927, 2936, 2942, 2967, 2973, 2982, 2988, 2999,
+ 3005, 3014, 3020, 3040, 3046, 3055, 3061, 3072,
+ 3078, 3087, 3093, 3116, 3122, 3131, 3137, 3148,
+ 3154, 3163, 3169, 3189, 3195, 3204, 3210, 3221,
+ 3227, 3236, 3242, 3267, 3273, 3282, 3288, 3299,
+ 3305, 3314, 3320, 3340, 3346, 3355, 3361, 3372,
+ 3378, 3387, 3393, 3257, 3263, 3272, 3278, 3289,
+ 3295, 3304, 3310, 3330, 3336, 3345, 3351, 3362,
+ 3368, 3377, 3383, 3408, 3414, 3423, 3429, 3440,
+ 3446, 3455, 3461, 3481, 3487, 3496, 3502, 3513,
+ 3519, 3528, 3534, 3557, 3563, 3572, 3578, 3589,
+ 3595, 3604, 3610, 3630, 3636, 3645, 3651, 3662,
+ 3668, 3677, 3683, 3708, 3714, 3723, 3729, 3740,
+ 3746, 3755, 3761, 3781, 3787, 3796, 3802, 3813,
+ 3819, 3828, 3834, 3629, 3635, 3644, 3650, 3661,
+ 3667, 3676, 3682, 3702, 3708, 3717, 3723, 3734,
+ 3740, 3749, 3755, 3780, 3786, 3795, 3801, 3812,
+ 3818, 3827, 3833, 3853, 3859, 3868, 3874, 3885,
+ 3891, 3900, 3906, 3929, 3935, 3944, 3950, 3961,
+ 3967, 3976, 3982, 4002, 4008, 4017, 4023, 4034,
+ 4040, 4049, 4055, 4080, 4086, 4095, 4101, 4112,
+ 4118, 4127, 4133, 4153, 4159, 4168, 4174, 4185,
+ 4191, 4200, 4206, 4070, 4076, 4085, 4091, 4102,
+ 4108, 4117, 4123, 4143, 4149, 4158, 4164, 4175,
+ 4181, 4190, 4196, 4221, 4227, 4236, 4242, 4253,
+ 4259, 4268, 4274, 4294, 4300, 4309, 4315, 4326,
+ 4332, 4341, 4347, 4370, 4376, 4385, 4391, 4402,
+ 4408, 4417, 4423, 4443, 4449, 4458, 4464, 4475,
+ 4481, 4490, 4496, 4521, 4527, 4536, 4542, 4553,
+ 4559, 4568, 4574, 4594, 4600, 4609, 4615, 4626,
+ 4632, 4641, 4647, 3515, 3521, 3530, 3536, 3547,
+ 3553, 3562, 3568, 3588, 3594, 3603, 3609, 3620,
+ 3626, 3635, 3641, 3666, 3672, 3681, 3687, 3698,
+ 3704, 3713, 3719, 3739, 3745, 3754, 3760, 3771,
+ 3777, 3786, 3792, 3815, 3821, 3830, 3836, 3847,
+ 3853, 3862, 3868, 3888, 3894, 3903, 3909, 3920,
+ 3926, 3935, 3941, 3966, 3972, 3981, 3987, 3998,
+ 4004, 4013, 4019, 4039, 4045, 4054, 4060, 4071,
+ 4077, 4086, 4092, 3956, 3962, 3971, 3977, 3988,
+ 3994, 4003, 4009, 4029, 4035, 4044, 4050, 4061,
+ 4067, 4076, 4082, 4107, 4113, 4122, 4128, 4139,
+ 4145, 4154, 4160, 4180, 4186, 4195, 4201, 4212,
+ 4218, 4227, 4233, 4256, 4262, 4271, 4277, 4288,
+ 4294, 4303, 4309, 4329, 4335, 4344, 4350, 4361,
+ 4367, 4376, 4382, 4407, 4413, 4422, 4428, 4439,
+ 4445, 4454, 4460, 4480, 4486, 4495, 4501, 4512,
+ 4518, 4527, 4533, 4328, 4334, 4343, 4349, 4360,
+ 4366, 4375, 4381, 4401, 4407, 4416, 4422, 4433,
+ 4439, 4448, 4454, 4479, 4485, 4494, 4500, 4511,
+ 4517, 4526, 4532, 4552, 4558, 4567, 4573, 4584,
+ 4590, 4599, 4605, 4628, 4634, 4643, 4649, 4660,
+ 4666, 4675, 4681, 4701, 4707, 4716, 4722, 4733,
+ 4739, 4748, 4754, 4779, 4785, 4794, 4800, 4811,
+ 4817, 4826, 4832, 4852, 4858, 4867, 4873, 4884,
+ 4890, 4899, 4905, 4769, 4775, 4784, 4790, 4801,
+ 4807, 4816, 4822, 4842, 4848, 4857, 4863, 4874,
+ 4880, 4889, 4895, 4920, 4926, 4935, 4941, 4952,
+ 4958, 4967, 4973, 4993, 4999, 5008, 5014, 5025,
+ 5031, 5040, 5046, 5069, 5075, 5084, 5090, 5101,
+ 5107, 5116, 5122, 5142, 5148, 5157, 5163, 5174,
+ 5180, 5189, 5195, 5220, 5226, 5235, 5241, 5252,
+ 5258, 5267, 5273, 5293, 5299, 5308, 5314, 5325,
+ 5331, 5340, 5346, 4604, 4610, 4619, 4625, 4636,
+ 4642, 4651, 4657, 4677, 4683, 4692, 4698, 4709,
+ 4715, 4724, 4730, 4755, 4761, 4770, 4776, 4787,
+ 4793, 4802, 4808, 4828, 4834, 4843, 4849, 4860,
+ 4866, 4875, 4881, 4904, 4910, 4919, 4925, 4936,
+ 4942, 4951, 4957, 4977, 4983, 4992, 4998, 5009,
+ 5015, 5024, 5030, 5055, 5061, 5070, 5076, 5087,
+ 5093, 5102, 5108, 5128, 5134, 5143, 5149, 5160,
+ 5166, 5175, 5181, 5045, 5051, 5060, 5066, 5077,
+ 5083, 5092, 5098, 5118, 5124, 5133, 5139, 5150,
+ 5156, 5165, 5171, 5196, 5202, 5211, 5217, 5228,
+ 5234, 5243, 5249, 5269, 5275, 5284, 5290, 5301,
+ 5307, 5316, 5322, 5345, 5351, 5360, 5366, 5377,
+ 5383, 5392, 5398, 5418, 5424, 5433, 5439, 5450,
+ 5456, 5465, 5471, 5496, 5502, 5511, 5517, 5528,
+ 5534, 5543, 5549, 5569, 5575, 5584, 5590, 5601,
+ 5607, 5616, 5622, 5417, 5423, 5432, 5438, 5449,
+ 5455, 5464, 5470, 5490, 5496, 5505, 5511, 5522,
+ 5528, 5537, 5543, 5568, 5574, 5583, 5589, 5600,
+ 5606, 5615, 5621, 5641, 5647, 5656, 5662, 5673,
+ 5679, 5688, 5694, 5717, 5723, 5732, 5738, 5749,
+ 5755, 5764, 5770, 5790, 5796, 5805, 5811, 5822,
+ 5828, 5837, 5843, 5868, 5874, 5883, 5889, 5900,
+ 5906, 5915, 5921, 5941, 5947, 5956, 5962, 5973,
+ 5979, 5988, 5994, 5858, 5864, 5873, 5879, 5890,
+ 5896, 5905, 5911, 5931, 5937, 5946, 5952, 5963,
+ 5969, 5978, 5984, 6009, 6015, 6024, 6030, 6041,
+ 6047, 6056, 6062, 6082, 6088, 6097, 6103, 6114,
+ 6120, 6129, 6135, 6158, 6164, 6173, 6179, 6190,
+ 6196, 6205, 6211, 6231, 6237, 6246, 6252, 6263,
+ 6269, 6278, 6284, 6309, 6315, 6324, 6330, 6341,
+ 6347, 6356, 6362, 6382, 6388, 6397, 6403, 6414,
+ 6420, 6429, 6435, 3515, 3521, 3530, 3536, 3547,
+ 3553, 3562, 3568, 3588, 3594, 3603, 3609, 3620,
+ 3626, 3635, 3641, 3666, 3672, 3681, 3687, 3698,
+ 3704, 3713, 3719, 3739, 3745, 3754, 3760, 3771,
+ 3777, 3786, 3792, 3815, 3821, 3830, 3836, 3847,
+ 3853, 3862, 3868, 3888, 3894, 3903, 3909, 3920,
+ 3926, 3935, 3941, 3966, 3972, 3981, 3987, 3998,
+ 4004, 4013, 4019, 4039, 4045, 4054, 4060, 4071,
+ 4077, 4086, 4092, 3956, 3962, 3971, 3977, 3988,
+ 3994, 4003, 4009, 4029, 4035, 4044, 4050, 4061,
+ 4067, 4076, 4082, 4107, 4113, 4122, 4128, 4139,
+ 4145, 4154, 4160, 4180, 4186, 4195, 4201, 4212,
+ 4218, 4227, 4233, 4256, 4262, 4271, 4277, 4288,
+ 4294, 4303, 4309, 4329, 4335, 4344, 4350, 4361,
+ 4367, 4376, 4382, 4407, 4413, 4422, 4428, 4439,
+ 4445, 4454, 4460, 4480, 4486, 4495, 4501, 4512,
+ 4518, 4527, 4533, 4328, 4334, 4343, 4349, 4360,
+ 4366, 4375, 4381, 4401, 4407, 4416, 4422, 4433,
+ 4439, 4448, 4454, 4479, 4485, 4494, 4500, 4511,
+ 4517, 4526, 4532, 4552, 4558, 4567, 4573, 4584,
+ 4590, 4599, 4605, 4628, 4634, 4643, 4649, 4660,
+ 4666, 4675, 4681, 4701, 4707, 4716, 4722, 4733,
+ 4739, 4748, 4754, 4779, 4785, 4794, 4800, 4811,
+ 4817, 4826, 4832, 4852, 4858, 4867, 4873, 4884,
+ 4890, 4899, 4905, 4769, 4775, 4784, 4790, 4801,
+ 4807, 4816, 4822, 4842, 4848, 4857, 4863, 4874,
+ 4880, 4889, 4895, 4920, 4926, 4935, 4941, 4952,
+ 4958, 4967, 4973, 4993, 4999, 5008, 5014, 5025,
+ 5031, 5040, 5046, 5069, 5075, 5084, 5090, 5101,
+ 5107, 5116, 5122, 5142, 5148, 5157, 5163, 5174,
+ 5180, 5189, 5195, 5220, 5226, 5235, 5241, 5252,
+ 5258, 5267, 5273, 5293, 5299, 5308, 5314, 5325,
+ 5331, 5340, 5346, 4604, 4610, 4619, 4625, 4636,
+ 4642, 4651, 4657, 4677, 4683, 4692, 4698, 4709,
+ 4715, 4724, 4730, 4755, 4761, 4770, 4776, 4787,
+ 4793, 4802, 4808, 4828, 4834, 4843, 4849, 4860,
+ 4866, 4875, 4881, 4904, 4910, 4919, 4925, 4936,
+ 4942, 4951, 4957, 4977, 4983, 4992, 4998, 5009,
+ 5015, 5024, 5030, 5055, 5061, 5070, 5076, 5087,
+ 5093, 5102, 5108, 5128, 5134, 5143, 5149, 5160,
+ 5166, 5175, 5181, 5045, 5051, 5060, 5066, 5077,
+ 5083, 5092, 5098, 5118, 5124, 5133, 5139, 5150,
+ 5156, 5165, 5171, 5196, 5202, 5211, 5217, 5228,
+ 5234, 5243, 5249, 5269, 5275, 5284, 5290, 5301,
+ 5307, 5316, 5322, 5345, 5351, 5360, 5366, 5377,
+ 5383, 5392, 5398, 5418, 5424, 5433, 5439, 5450,
+ 5456, 5465, 5471, 5496, 5502, 5511, 5517, 5528,
+ 5534, 5543, 5549, 5569, 5575, 5584, 5590, 5601,
+ 5607, 5616, 5622, 5417, 5423, 5432, 5438, 5449,
+ 5455, 5464, 5470, 5490, 5496, 5505, 5511, 5522,
+ 5528, 5537, 5543, 5568, 5574, 5583, 5589, 5600,
+ 5606, 5615, 5621, 5641, 5647, 5656, 5662, 5673,
+ 5679, 5688, 5694, 5717, 5723, 5732, 5738, 5749,
+ 5755, 5764, 5770, 5790, 5796, 5805, 5811, 5822,
+ 5828, 5837, 5843, 5868, 5874, 5883, 5889, 5900,
+ 5906, 5915, 5921, 5941, 5947, 5956, 5962, 5973,
+ 5979, 5988, 5994, 5858, 5864, 5873, 5879, 5890,
+ 5896, 5905, 5911, 5931, 5937, 5946, 5952, 5963,
+ 5969, 5978, 5984, 6009, 6015, 6024, 6030, 6041,
+ 6047, 6056, 6062, 6082, 6088, 6097, 6103, 6114,
+ 6120, 6129, 6135, 6158, 6164, 6173, 6179, 6190,
+ 6196, 6205, 6211, 6231, 6237, 6246, 6252, 6263,
+ 6269, 6278, 6284, 6309, 6315, 6324, 6330, 6341,
+ 6347, 6356, 6362, 6382, 6388, 6397, 6403, 6414,
+ 6420, 6429, 6435, 5303, 5309, 5318, 5324, 5335,
+ 5341, 5350, 5356, 5376, 5382, 5391, 5397, 5408,
+ 5414, 5423, 5429, 5454, 5460, 5469, 5475, 5486,
+ 5492, 5501, 5507, 5527, 5533, 5542, 5548, 5559,
+ 5565, 5574, 5580, 5603, 5609, 5618, 5624, 5635,
+ 5641, 5650, 5656, 5676, 5682, 5691, 5697, 5708,
+ 5714, 5723, 5729, 5754, 5760, 5769, 5775, 5786,
+ 5792, 5801, 5807, 5827, 5833, 5842, 5848, 5859,
+ 5865, 5874, 5880, 5744, 5750, 5759, 5765, 5776,
+ 5782, 5791, 5797, 5817, 5823, 5832, 5838, 5849,
+ 5855, 5864, 5870, 5895, 5901, 5910, 5916, 5927,
+ 5933, 5942, 5948, 5968, 5974, 5983, 5989, 6000,
+ 6006, 6015, 6021, 6044, 6050, 6059, 6065, 6076,
+ 6082, 6091, 6097, 6117, 6123, 6132, 6138, 6149,
+ 6155, 6164, 6170, 6195, 6201, 6210, 6216, 6227,
+ 6233, 6242, 6248, 6268, 6274, 6283, 6289, 6300,
+ 6306, 6315, 6321, 6116, 6122, 6131, 6137, 6148,
+ 6154, 6163, 6169, 6189, 6195, 6204, 6210, 6221,
+ 6227, 6236, 6242, 6267, 6273, 6282, 6288, 6299,
+ 6305, 6314, 6320, 6340, 6346, 6355, 6361, 6372,
+ 6378, 6387, 6393, 6416, 6422, 6431, 6437, 6448,
+ 6454, 6463, 6469, 6489, 6495, 6504, 6510, 6521,
+ 6527, 6536, 6542, 6567, 6573, 6582, 6588, 6599,
+ 6605, 6614, 6620, 6640, 6646, 6655, 6661, 6672,
+ 6678, 6687, 6693, 6557, 6563, 6572, 6578, 6589,
+ 6595, 6604, 6610, 6630, 6636, 6645, 6651, 6662,
+ 6668, 6677, 6683, 6708, 6714, 6723, 6729, 6740,
+ 6746, 6755, 6761, 6781, 6787, 6796, 6802, 6813,
+ 6819, 6828, 6834, 6857, 6863, 6872, 6878, 6889,
+ 6895, 6904, 6910, 6930, 6936, 6945, 6951, 6962,
+ 6968, 6977, 6983, 7008, 7014, 7023, 7029, 7040,
+ 7046, 7055, 7061, 7081, 7087, 7096, 7102, 7113,
+ 7119, 7128, 7134, 6392, 6398, 6407, 6413, 6424,
+ 6430, 6439, 6445, 6465, 6471, 6480, 6486, 6497,
+ 6503, 6512, 6518, 6543, 6549, 6558, 6564, 6575,
+ 6581, 6590, 6596, 6616, 6622, 6631, 6637, 6648,
+ 6654, 6663, 6669, 6692, 6698, 6707, 6713, 6724,
+ 6730, 6739, 6745, 6765, 6771, 6780, 6786, 6797,
+ 6803, 6812, 6818, 6843, 6849, 6858, 6864, 6875,
+ 6881, 6890, 6896, 6916, 6922, 6931, 6937, 6948,
+ 6954, 6963, 6969, 6833, 6839, 6848, 6854, 6865,
+ 6871, 6880, 6886, 6906, 6912, 6921, 6927, 6938,
+ 6944, 6953, 6959, 6984, 6990, 6999, 7005, 7016,
+ 7022, 7031, 7037, 7057, 7063, 7072, 7078, 7089,
+ 7095, 7104, 7110, 7133, 7139, 7148, 7154, 7165,
+ 7171, 7180, 7186, 7206, 7212, 7221, 7227, 7238,
+ 7244, 7253, 7259, 7284, 7290, 7299, 7305, 7316,
+ 7322, 7331, 7337, 7357, 7363, 7372, 7378, 7389,
+ 7395, 7404, 7410, 7205, 7211, 7220, 7226, 7237,
+ 7243, 7252, 7258, 7278, 7284, 7293, 7299, 7310,
+ 7316, 7325, 7331, 7356, 7362, 7371, 7377, 7388,
+ 7394, 7403, 7409, 7429, 7435, 7444, 7450, 7461,
+ 7467, 7476, 7482, 7505, 7511, 7520, 7526, 7537,
+ 7543, 7552, 7558, 7578, 7584, 7593, 7599, 7610,
+ 7616, 7625, 7631, 7656, 7662, 7671, 7677, 7688,
+ 7694, 7703, 7709, 7729, 7735, 7744, 7750, 7761
+};
+
+static int VariableLevelCost(int level, const uint8_t probas[NUM_PROBAS]) {
+ int pattern = kLevelCodes[level - 1][0];
+ int bits = kLevelCodes[level - 1][1];
+ int cost = 0;
+ int i;
+ for (i = 2; pattern; ++i) {
+ if (pattern & 1) {
+ cost += VP8BitCost(bits & 1, probas[i]);
+ }
+ bits >>= 1;
+ pattern >>= 1;
+ }
+ return cost;
+}
+
+//-----------------------------------------------------------------------------
+// Pre-calc level costs once for all
+
+void VP8CalculateLevelCosts(VP8Proba* const proba) {
+ int ctype, band, ctx;
+ for (ctype = 0; ctype < NUM_TYPES; ++ctype) {
+ for (band = 0; band < NUM_BANDS; ++band) {
+ for(ctx = 0; ctx < NUM_CTX; ++ctx) {
+ const uint8_t* const p = proba->coeffs_[ctype][band][ctx];
+ uint16_t* const table = proba->level_cost_[ctype][band][ctx];
+ const int cost_base = VP8BitCost(1, p[1]);
+ int v;
+ table[0] = VP8BitCost(0, p[1]);
+ for (v = 1; v <= MAX_VARIABLE_LEVEL; ++v) {
+ table[v] = cost_base + VariableLevelCost(v, p);
+ }
+ // Starting at level 67 and up, the variable part of the cost is
+ // actually constant.
+ }
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Mode cost tables.
+
+// These are the fixed probabilities (in the coding trees) turned into bit-cost
+// by calling VP8BitCost().
+const uint16_t VP8FixedCostsUV[4] = { 302, 984, 439, 642 };
+const uint16_t VP8FixedCostsI16[4] = { 663, 919, 872, 919 };
+const uint16_t VP8FixedCostsI4[NUM_BMODES][NUM_BMODES][NUM_BMODES] = {
+ { { 251, 1362, 1934, 2085, 2314, 2230, 1839, 1988, 2437, 2348 },
+ { 403, 680, 1507, 1519, 2060, 2005, 1992, 1914, 1924, 1733 },
+ { 353, 1121, 973, 1895, 2060, 1787, 1671, 1516, 2012, 1868 },
+ { 770, 852, 1581, 632, 1393, 1780, 1823, 1936, 1074, 1218 },
+ { 510, 1270, 1467, 1319, 847, 1279, 1792, 2094, 1080, 1353 },
+ { 488, 1322, 918, 1573, 1300, 883, 1814, 1752, 1756, 1502 },
+ { 425, 992, 1820, 1514, 1843, 2440, 937, 1771, 1924, 1129 },
+ { 363, 1248, 1257, 1970, 2194, 2385, 1569, 953, 1951, 1601 },
+ { 723, 1257, 1631, 964, 963, 1508, 1697, 1824, 671, 1418 },
+ { 635, 1038, 1573, 930, 1673, 1413, 1410, 1687, 1410, 749 } },
+ { { 451, 613, 1345, 1702, 1870, 1716, 1728, 1766, 2190, 2310 },
+ { 678, 453, 1171, 1443, 1925, 1831, 2045, 1781, 1887, 1602 },
+ { 711, 666, 674, 1718, 1910, 1493, 1775, 1193, 2325, 2325 },
+ { 883, 854, 1583, 542, 1800, 1878, 1664, 2149, 1207, 1087 },
+ { 669, 994, 1248, 1122, 949, 1179, 1376, 1729, 1070, 1244 },
+ { 715, 1026, 715, 1350, 1430, 930, 1717, 1296, 1479, 1479 },
+ { 544, 841, 1656, 1450, 2094, 3883, 1010, 1759, 2076, 809 },
+ { 610, 855, 957, 1553, 2067, 1561, 1704, 824, 2066, 1226 },
+ { 833, 960, 1416, 819, 1277, 1619, 1501, 1617, 757, 1182 },
+ { 711, 964, 1252, 879, 1441, 1828, 1508, 1636, 1594, 734 } },
+ { { 605, 764, 734, 1713, 1747, 1192, 1819, 1353, 1877, 2392 },
+ { 866, 641, 586, 1622, 2072, 1431, 1888, 1346, 2189, 1764 },
+ { 901, 851, 456, 2165, 2281, 1405, 1739, 1193, 2183, 2443 },
+ { 770, 1045, 952, 1078, 1342, 1191, 1436, 1063, 1303, 995 },
+ { 901, 1086, 727, 1170, 884, 1105, 1267, 1401, 1739, 1337 },
+ { 951, 1162, 595, 1488, 1388, 703, 1790, 1366, 2057, 1724 },
+ { 534, 986, 1273, 1987, 3273, 1485, 1024, 1399, 1583, 866 },
+ { 699, 1182, 695, 1978, 1726, 1986, 1326, 714, 1750, 1672 },
+ { 951, 1217, 1209, 920, 1062, 1441, 1548, 999, 952, 932 },
+ { 733, 1284, 784, 1256, 1557, 1098, 1257, 1357, 1414, 908 } },
+ { { 316, 1075, 1653, 1220, 2145, 2051, 1730, 2131, 1884, 1790 },
+ { 745, 516, 1404, 894, 1599, 2375, 2013, 2105, 1475, 1381 },
+ { 516, 729, 1088, 1319, 1637, 3426, 1636, 1275, 1531, 1453 },
+ { 894, 943, 2138, 468, 1704, 2259, 2069, 1763, 1266, 1158 },
+ { 605, 1025, 1235, 871, 1170, 1767, 1493, 1500, 1104, 1258 },
+ { 739, 826, 1207, 1151, 1412, 846, 1305, 2726, 1014, 1569 },
+ { 558, 825, 1820, 1398, 3344, 1556, 1218, 1550, 1228, 878 },
+ { 429, 951, 1089, 1816, 3861, 3861, 1556, 969, 1568, 1828 },
+ { 883, 961, 1752, 769, 1468, 1810, 2081, 2346, 613, 1298 },
+ { 803, 895, 1372, 641, 1303, 1708, 1686, 1700, 1306, 1033 } },
+ { { 439, 1267, 1270, 1579, 963, 1193, 1723, 1729, 1198, 1993 },
+ { 705, 725, 1029, 1153, 1176, 1103, 1821, 1567, 1259, 1574 },
+ { 723, 859, 802, 1253, 972, 1202, 1407, 1665, 1520, 1674 },
+ { 894, 960, 1254, 887, 1052, 1607, 1344, 1349, 865, 1150 },
+ { 833, 1312, 1337, 1205, 572, 1288, 1414, 1529, 1088, 1430 },
+ { 842, 1279, 1068, 1861, 862, 688, 1861, 1630, 1039, 1381 },
+ { 766, 938, 1279, 1546, 3338, 1550, 1031, 1542, 1288, 640 },
+ { 715, 1090, 835, 1609, 1100, 1100, 1603, 1019, 1102, 1617 },
+ { 894, 1813, 1500, 1188, 789, 1194, 1491, 1919, 617, 1333 },
+ { 610, 1076, 1644, 1281, 1283, 975, 1179, 1688, 1434, 889 } },
+ { { 544, 971, 1146, 1849, 1221, 740, 1857, 1621, 1683, 2430 },
+ { 723, 705, 961, 1371, 1426, 821, 2081, 2079, 1839, 1380 },
+ { 783, 857, 703, 2145, 1419, 814, 1791, 1310, 1609, 2206 },
+ { 997, 1000, 1153, 792, 1229, 1162, 1810, 1418, 942, 979 },
+ { 901, 1226, 883, 1289, 793, 715, 1904, 1649, 1319, 3108 },
+ { 979, 1478, 782, 2216, 1454, 455, 3092, 1591, 1997, 1664 },
+ { 663, 1110, 1504, 1114, 1522, 3311, 676, 1522, 1530, 1024 },
+ { 605, 1138, 1153, 1314, 1569, 1315, 1157, 804, 1574, 1320 },
+ { 770, 1216, 1218, 1227, 869, 1384, 1232, 1375, 834, 1239 },
+ { 775, 1007, 843, 1216, 1225, 1074, 2527, 1479, 1149, 975 } },
+ { { 477, 817, 1309, 1439, 1708, 1454, 1159, 1241, 1945, 1672 },
+ { 577, 796, 1112, 1271, 1618, 1458, 1087, 1345, 1831, 1265 },
+ { 663, 776, 753, 1940, 1690, 1690, 1227, 1097, 3149, 1361 },
+ { 766, 1299, 1744, 1161, 1565, 1106, 1045, 1230, 1232, 707 },
+ { 915, 1026, 1404, 1182, 1184, 851, 1428, 2425, 1043, 789 },
+ { 883, 1456, 790, 1082, 1086, 985, 1083, 1484, 1238, 1160 },
+ { 507, 1345, 2261, 1995, 1847, 3636, 653, 1761, 2287, 933 },
+ { 553, 1193, 1470, 2057, 2059, 2059, 833, 779, 2058, 1263 },
+ { 766, 1275, 1515, 1039, 957, 1554, 1286, 1540, 1289, 705 },
+ { 499, 1378, 1496, 1385, 1850, 1850, 1044, 2465, 1515, 720 } },
+ { { 553, 930, 978, 2077, 1968, 1481, 1457, 761, 1957, 2362 },
+ { 694, 864, 905, 1720, 1670, 1621, 1429, 718, 2125, 1477 },
+ { 699, 968, 658, 3190, 2024, 1479, 1865, 750, 2060, 2320 },
+ { 733, 1308, 1296, 1062, 1576, 1322, 1062, 1112, 1172, 816 },
+ { 920, 927, 1052, 939, 947, 1156, 1152, 1073, 3056, 1268 },
+ { 723, 1534, 711, 1547, 1294, 892, 1553, 928, 1815, 1561 },
+ { 663, 1366, 1583, 2111, 1712, 3501, 522, 1155, 2130, 1133 },
+ { 614, 1731, 1188, 2343, 1944, 3733, 1287, 487, 3546, 1758 },
+ { 770, 1585, 1312, 826, 884, 2673, 1185, 1006, 1195, 1195 },
+ { 758, 1333, 1273, 1023, 1621, 1162, 1351, 833, 1479, 862 } },
+ { { 376, 1193, 1446, 1149, 1545, 1577, 1870, 1789, 1175, 1823 },
+ { 803, 633, 1136, 1058, 1350, 1323, 1598, 2247, 1072, 1252 },
+ { 614, 1048, 943, 981, 1152, 1869, 1461, 1020, 1618, 1618 },
+ { 1107, 1085, 1282, 592, 1779, 1933, 1648, 2403, 691, 1246 },
+ { 851, 1309, 1223, 1243, 895, 1593, 1792, 2317, 627, 1076 },
+ { 770, 1216, 1030, 1125, 921, 981, 1629, 1131, 1049, 1646 },
+ { 626, 1469, 1456, 1081, 1489, 3278, 981, 1232, 1498, 733 },
+ { 617, 1201, 812, 1220, 1476, 1476, 1478, 970, 1228, 1488 },
+ { 1179, 1393, 1540, 999, 1243, 1503, 1916, 1925, 414, 1614 },
+ { 943, 1088, 1490, 682, 1112, 1372, 1756, 1505, 966, 966 } },
+ { { 322, 1142, 1589, 1396, 2144, 1859, 1359, 1925, 2084, 1518 },
+ { 617, 625, 1241, 1234, 2121, 1615, 1524, 1858, 1720, 1004 },
+ { 553, 851, 786, 1299, 1452, 1560, 1372, 1561, 1967, 1713 },
+ { 770, 977, 1396, 568, 1893, 1639, 1540, 2108, 1430, 1013 },
+ { 684, 1120, 1375, 982, 930, 2719, 1638, 1643, 933, 993 },
+ { 553, 1103, 996, 1356, 1361, 1005, 1507, 1761, 1184, 1268 },
+ { 419, 1247, 1537, 1554, 1817, 3606, 1026, 1666, 1829, 923 },
+ { 439, 1139, 1101, 1257, 3710, 1922, 1205, 1040, 1931, 1529 },
+ { 979, 935, 1269, 847, 1202, 1286, 1530, 1535, 827, 1036 },
+ { 516, 1378, 1569, 1110, 1798, 1798, 1198, 2199, 1543, 712 } },
+};
+
+//-----------------------------------------------------------------------------
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/src/enc/cost.h b/src/enc/cost.h
new file mode 100644
index 00000000..b80bb106
--- /dev/null
+++ b/src/enc/cost.h
@@ -0,0 +1,52 @@
+// Copyright 2011 Google Inc.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// Cost tables for level and modes.
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#ifndef WEBP_ENC_COST_H_
+#define WEBP_ENC_COST_H_
+
+#include "vp8enci.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+extern const uint16_t VP8LevelFixedCosts[2048]; // approximate cost per level
+extern const uint16_t VP8EntropyCost[256]; // 8bit fixed-point log(p)
+
+// Cost of coding one event with probability 'proba'.
+static inline int VP8BitCost(int bit, uint8_t proba) {
+ return !bit ? VP8EntropyCost[proba] : VP8EntropyCost[255 - proba];
+}
+
+// Cost of coding 'nb' 1's and 'total-nb' 0's using 'proba' probability.
+static inline uint64_t VP8BranchCost(uint64_t nb, uint64_t total, uint8_t proba) {
+ return nb * VP8BitCost(1, proba) + (total - nb) * VP8BitCost(0, proba);
+}
+
+// Level cost calculations
+void VP8CalculateLevelCosts(VP8Proba* const proba);
+static inline int VP8LevelCost(const uint16_t* const table, int level) {
+ return VP8LevelFixedCosts[level]
+ + table[level > MAX_VARIABLE_LEVEL ? MAX_VARIABLE_LEVEL : level];
+}
+
+// Mode costs
+extern const uint16_t VP8FixedCostsUV[4];
+extern const uint16_t VP8FixedCostsI16[4];
+extern const uint16_t VP8FixedCostsI4[NUM_BMODES][NUM_BMODES][NUM_BMODES];
+
+//-----------------------------------------------------------------------------
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
+
+#endif // WEBP_ENC_COST_H_
diff --git a/src/enc/dsp.c b/src/enc/dsp.c
new file mode 100644
index 00000000..45f977c9
--- /dev/null
+++ b/src/enc/dsp.c
@@ -0,0 +1,647 @@
+// Copyright 2011 Google Inc.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// speed-critical functions.
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#include <assert.h>
+#include "vp8enci.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+//-----------------------------------------------------------------------------
+// run-time tables (~4k)
+
+static uint8_t clip1[255 + 510 + 1]; // clips [-255,510] to [0,255]
+
+// We declare this variable 'volatile' to prevent instruction reordering
+// and make sure it's set to true _last_ (so as to be thread-safe)
+static volatile int tables_ok = 0;
+
+static void InitTables(void) {
+ if (!tables_ok) {
+ int i;
+ for (i = -255; i <= 255 + 255; ++i) {
+ clip1[255 + i] = (i < 0) ? 0 : (i > 255) ? 255 : i;
+ }
+ tables_ok = 1;
+ }
+}
+
+static inline uint8_t clip_8b(int v) {
+ return (!(v & ~0xff)) ? v : v < 0 ? 0 : 255;
+}
+
+//-----------------------------------------------------------------------------
+// Transforms (Paragraph 14.4)
+
+#define STORE(x, y, v) \
+ dst[(x) + (y) * BPS] = clip_8b(ref[(x) + (y) * BPS] + ((v) >> 3))
+
+static const int kC1 = 20091 + (1 << 16);
+static const int kC2 = 35468;
+#define MUL(a, b) (((a) * (b)) >> 16)
+
+static void ITransform(const uint8_t* ref, const int16_t* in, uint8_t* dst) {
+ int C[4 * 4], *tmp;
+ int i;
+ tmp = C;
+ for (i = 0; i < 4; ++i) { // vertical pass
+ const int a = in[0] + in[8];
+ const int b = in[0] - in[8];
+ const int c = MUL(in[4], kC2) - MUL(in[12], kC1);
+ const int d = MUL(in[4], kC1) + MUL(in[12], kC2);
+ tmp[0] = a + d;
+ tmp[1] = b + c;
+ tmp[2] = b - c;
+ tmp[3] = a - d;
+ tmp += 4;
+ in++;
+ }
+
+ tmp = C;
+ for (i = 0; i < 4; ++i) { // horizontal pass
+ const int dc = tmp[0] + 4;
+ const int a = dc + tmp[8];
+ const int b = dc - tmp[8];
+ const int c = MUL(tmp[4], kC2) - MUL(tmp[12], kC1);
+ const int d = MUL(tmp[4], kC1) + MUL(tmp[12], kC2);
+ STORE(0, i, a + d);
+ STORE(1, i, b + c);
+ STORE(2, i, b - c);
+ STORE(3, i, a - d);
+ tmp++;
+ }
+}
+
+static void FTransform(const uint8_t* src, const uint8_t* ref, int16_t* out) {
+ int i;
+ int tmp[16];
+ for (i = 0; i < 4; ++i, src += BPS, ref += BPS) {
+ const int d0 = src[0] - ref[0];
+ const int d1 = src[1] - ref[1];
+ const int d2 = src[2] - ref[2];
+ const int d3 = src[3] - ref[3];
+ const int a0 = (d0 + d3) << 3;
+ const int a1 = (d1 + d2) << 3;
+ const int a2 = (d1 - d2) << 3;
+ const int a3 = (d0 - d3) << 3;
+ tmp[0 + i * 4] = (a0 + a1);
+ tmp[1 + i * 4] = (a2 * 2217 + a3 * 5352 + 14500) >> 12;
+ tmp[2 + i * 4] = (a0 - a1);
+ tmp[3 + i * 4] = (a3 * 2217 - a2 * 5352 + 7500) >> 12;
+ }
+ for (i = 0; i < 4; ++i) {
+ const int a0 = (tmp[0 + i] + tmp[12 + i]);
+ const int a1 = (tmp[4 + i] + tmp[ 8 + i]);
+ const int a2 = (tmp[4 + i] - tmp[ 8 + i]);
+ const int a3 = (tmp[0 + i] - tmp[12 + i]);
+ out[0 + i] = (a0 + a1 + 7) >> 4;
+ out[4 + i] = ((a2 * 2217 + a3 * 5352 + 12000) >> 16) + (a3 != 0);
+ out[8 + i] = (a0 - a1 + 7) >> 4;
+ out[12+ i] = ((a3 * 2217 - a2 * 5352 + 51000) >> 16);
+ }
+}
+
+static void ITransformWHT(const int16_t* in, int16_t* out) {
+ int tmp[16];
+ int i;
+ for (i = 0; i < 4; ++i) {
+ const int a0 = in[0 + i] + in[12 + i];
+ const int a1 = in[4 + i] + in[ 8 + i];
+ const int a2 = in[4 + i] - in[ 8 + i];
+ const int a3 = in[0 + i] - in[12 + i];
+ tmp[0 + i] = a0 + a1;
+ tmp[8 + i] = a0 - a1;
+ tmp[4 + i] = a3 + a2;
+ tmp[12 + i] = a3 - a2;
+ }
+ for (i = 0; i < 4; ++i) {
+ const int dc = tmp[0 + i * 4] + 3; // w/ rounder
+ const int a0 = dc + tmp[3 + i * 4];
+ const int a1 = tmp[1 + i * 4] + tmp[2 + i * 4];
+ const int a2 = tmp[1 + i * 4] - tmp[2 + i * 4];
+ const int a3 = dc - tmp[3 + i * 4];
+ out[ 0] = (a0 + a1) >> 3;
+ out[16] = (a3 + a2) >> 3;
+ out[32] = (a0 - a1) >> 3;
+ out[48] = (a3 - a2) >> 3;
+ out += 64;
+ }
+}
+
+static void FTransformWHT(const int16_t* in, int16_t* out) {
+ int tmp[16];
+ int i;
+ for (i = 0; i < 4; ++i, in += 64) {
+ const int a0 = (in[0 * 16] + in[2 * 16]) << 2;
+ const int a1 = (in[1 * 16] + in[3 * 16]) << 2;
+ const int a2 = (in[1 * 16] - in[3 * 16]) << 2;
+ const int a3 = (in[0 * 16] - in[2 * 16]) << 2;
+ tmp[0 + i * 4] = (a0 + a1) + (a0 != 0);
+ tmp[1 + i * 4] = a3 + a2;
+ tmp[2 + i * 4] = a3 - a2;
+ tmp[3 + i * 4] = a0 - a1;
+ }
+ for (i = 0; i < 4; ++i) {
+ const int a0 = (tmp[0 + i] + tmp[8 + i]);
+ const int a1 = (tmp[4 + i] + tmp[12+ i]);
+ const int a2 = (tmp[4 + i] - tmp[12+ i]);
+ const int a3 = (tmp[0 + i] - tmp[8 + i]);
+ const int b0 = a0 + a1;
+ const int b1 = a3 + a2;
+ const int b2 = a3 - a2;
+ const int b3 = a0 - a1;
+ out[ 0 + i] = (b0 + (b0 > 0) + 3) >> 3;
+ out[ 4 + i] = (b1 + (b1 > 0) + 3) >> 3;
+ out[ 8 + i] = (b2 + (b2 > 0) + 3) >> 3;
+ out[12 + i] = (b3 + (b3 > 0) + 3) >> 3;
+ }
+}
+
+// default C implementations:
+VP8Idct VP8ITransform = ITransform;
+VP8Fdct VP8FTransform = FTransform;
+VP8WHT VP8ITransformWHT = ITransformWHT;
+VP8WHT VP8FTransformWHT = FTransformWHT;
+
+#undef MUL
+#undef STORE
+
+//-----------------------------------------------------------------------------
+// Intra predictions
+
+#define OUT(x, y) dst[(x) + (y) * BPS]
+
+static inline void Fill(uint8_t* dst, int value, int size) {
+ int j;
+ for (j = 0; j < size; ++j) {
+ memset(dst + j * BPS, value, size);
+ }
+}
+
+static inline void VerticalPred(uint8_t* dst, const uint8_t* top, int size) {
+ int j;
+ if (top) {
+ for (j = 0; j < size; ++j) memcpy(dst + j * BPS, top, size);
+ } else {
+ Fill(dst, 127, size);
+ }
+}
+
+static inline void HorizontalPred(uint8_t* dst, const uint8_t* left, int size) {
+ if (left) {
+ int j;
+ for (j = 0; j < size; ++j) {
+ memset(dst + j * BPS, left[j], size);
+ }
+ } else {
+ Fill(dst, 129, size);
+ }
+}
+
+static inline void TrueMotion(uint8_t* dst, const uint8_t* left,
+ const uint8_t* top, int size) {
+ int y;
+ if (left) {
+ if (top) {
+ const uint8_t* const clip = clip1 + 255 - left[-1];
+ for (y = 0; y < size; ++y) {
+ const uint8_t* const clip_table = clip + left[y];
+ int x;
+ for (x = 0; x < size; ++x) {
+ dst[x] = clip_table[top[x]];
+ }
+ dst += BPS;
+ }
+ } else {
+ HorizontalPred(dst, left, size);
+ }
+ } else {
+ // true motion without left samples (hence: with default 129 value)
+ // is equivalent to VE prediction where you just copy the top samples.
+ // Note that if top samples are not available, the default value is
+ // then 129, and not 127 as in the VerticalPred case.
+ if (top) {
+ VerticalPred(dst, top, size);
+ } else {
+ Fill(dst, 129, size);
+ }
+ }
+}
+
+static inline void DCMode(uint8_t* dst, const uint8_t* left,
+ const uint8_t* top,
+ int size, int round, int shift) {
+ int DC = 0;
+ int j;
+ if (top) {
+ for (j = 0; j < size; ++j) DC += top[j];
+ if (left) { // top and left present
+ for (j = 0; j < size; ++j) DC += left[j];
+ } else { // top, but no left
+ DC += DC;
+ }
+ DC = (DC + round) >> shift;
+ } else if (left) { // left but no top
+ for (j = 0; j < size; ++j) DC += left[j];
+ DC += DC;
+ DC = (DC + round) >> shift;
+ } else { // no top, no left, nothing.
+ DC = 0x80;
+ }
+ Fill(dst, DC, size);
+}
+
+//-----------------------------------------------------------------------------
+// Chroma 8x8 prediction (paragraph 12.2)
+
+static void IntraChromaPreds(uint8_t* dst, const uint8_t* left,
+ const uint8_t* top) {
+ // U block
+ DCMode(C8DC8 + dst, left, top, 8, 8, 4);
+ VerticalPred(C8VE8 + dst, top, 8);
+ HorizontalPred(C8HE8 + dst, left, 8);
+ TrueMotion(C8TM8 + dst, left, top, 8);
+ // V block
+ dst += 8;
+ if (top) top += 8;
+ if (left) left += 16;
+ DCMode(C8DC8 + dst, left, top, 8, 8, 4);
+ VerticalPred(C8VE8 + dst, top, 8);
+ HorizontalPred(C8HE8 + dst, left, 8);
+ TrueMotion(C8TM8 + dst, left, top, 8);
+}
+
+//-----------------------------------------------------------------------------
+// luma 16x16 prediction (paragraph 12.3)
+
+static void Intra16Preds(uint8_t* dst,
+ const uint8_t* left, const uint8_t* top) {
+ DCMode(I16DC16 + dst, left, top, 16, 16, 5);
+ VerticalPred(I16VE16 + dst, top, 16);
+ HorizontalPred(I16HE16 + dst, left, 16);
+ TrueMotion(I16TM16 + dst, left, top, 16);
+}
+
+//-----------------------------------------------------------------------------
+// luma 4x4 prediction
+
+#define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2)
+#define AVG2(a, b) (((a) + (b) + 1) >> 1)
+
+static void VE4(uint8_t* dst, const uint8_t* top) { // vertical
+ const uint8_t vals[4] = {
+ AVG3(top[-1], top[0], top[1]),
+ AVG3(top[ 0], top[1], top[2]),
+ AVG3(top[ 1], top[2], top[3]),
+ AVG3(top[ 2], top[3], top[4])
+ };
+ int i;
+ for (i = 0; i < 4; ++i) {
+ memcpy(dst + i * BPS, vals, 4);
+ }
+}
+
+static void HE4(uint8_t* dst, const uint8_t* top) { // horizontal
+ const int X = top[-1];
+ const int I = top[-2];
+ const int J = top[-3];
+ const int K = top[-4];
+ const int L = top[-5];
+ *(uint32_t*)(dst + 0 * BPS) = 0x01010101U * AVG3(X, I, J);
+ *(uint32_t*)(dst + 1 * BPS) = 0x01010101U * AVG3(I, J, K);
+ *(uint32_t*)(dst + 2 * BPS) = 0x01010101U * AVG3(J, K, L);
+ *(uint32_t*)(dst + 3 * BPS) = 0x01010101U * AVG3(K, L, L);
+}
+
+static void DC4(uint8_t* dst, const uint8_t* top) {
+ uint32_t dc = 4;
+ int i;
+ for (i = 0; i < 4; ++i) dc += top[i] + top[-5 + i];
+ Fill(dst, dc >> 3, 4);
+}
+
+static void RD4(uint8_t* dst, const uint8_t* top) {
+ const int X = top[-1];
+ const int I = top[-2];
+ const int J = top[-3];
+ const int K = top[-4];
+ const int L = top[-5];
+ const int A = top[0];
+ const int B = top[1];
+ const int C = top[2];
+ const int D = top[3];
+ OUT(0, 3) = AVG3(J, K, L);
+ OUT(0, 2) = OUT(1, 3) = AVG3(I, J, K);
+ OUT(0, 1) = OUT(1, 2) = OUT(2, 3) = AVG3(X, I, J);
+ OUT(0, 0) = OUT(1, 1) = OUT(2, 2) = OUT(3, 3) = AVG3(A, X, I);
+ OUT(1, 0) = OUT(2, 1) = OUT(3, 2) = AVG3(B, A, X);
+ OUT(2, 0) = OUT(3, 1) = AVG3(C, B, A);
+ OUT(3, 0) = AVG3(D, C, B);
+}
+
+static void LD4(uint8_t* dst, const uint8_t* top) {
+ const int A = top[0];
+ const int B = top[1];
+ const int C = top[2];
+ const int D = top[3];
+ const int E = top[4];
+ const int F = top[5];
+ const int G = top[6];
+ const int H = top[7];
+ OUT(0, 0) = AVG3(A, B, C);
+ OUT(1, 0) = OUT(0, 1) = AVG3(B, C, D);
+ OUT(2, 0) = OUT(1, 1) = OUT(0, 2) = AVG3(C, D, E);
+ OUT(3, 0) = OUT(2, 1) = OUT(1, 2) = OUT(0, 3) = AVG3(D, E, F);
+ OUT(3, 1) = OUT(2, 2) = OUT(1, 3) = AVG3(E, F, G);
+ OUT(3, 2) = OUT(2, 3) = AVG3(F, G, H);
+ OUT(3, 3) = AVG3(G, H, H);
+}
+
+static void VR4(uint8_t* dst, const uint8_t* top) {
+ const int X = top[-1];
+ const int I = top[-2];
+ const int J = top[-3];
+ const int K = top[-4];
+ const int A = top[0];
+ const int B = top[1];
+ const int C = top[2];
+ const int D = top[3];
+ OUT(0, 0) = OUT(1, 2) = AVG2(X, A);
+ OUT(1, 0) = OUT(2, 2) = AVG2(A, B);
+ OUT(2, 0) = OUT(3, 2) = AVG2(B, C);
+ OUT(3, 0) = AVG2(C, D);
+
+ OUT(0, 3) = AVG3(K, J, I);
+ OUT(0, 2) = AVG3(J, I, X);
+ OUT(0, 1) = OUT(1, 3) = AVG3(I, X, A);
+ OUT(1, 1) = OUT(2, 3) = AVG3(X, A, B);
+ OUT(2, 1) = OUT(3, 3) = AVG3(A, B, C);
+ OUT(3, 1) = AVG3(B, C, D);
+}
+
+static void VL4(uint8_t* dst, const uint8_t* top) {
+ const int A = top[0];
+ const int B = top[1];
+ const int C = top[2];
+ const int D = top[3];
+ const int E = top[4];
+ const int F = top[5];
+ const int G = top[6];
+ const int H = top[7];
+ OUT(0, 0) = AVG2(A, B);
+ OUT(1, 0) = OUT(0, 2) = AVG2(B, C);
+ OUT(2, 0) = OUT(1, 2) = AVG2(C, D);
+ OUT(3, 0) = OUT(2, 2) = AVG2(D, E);
+
+ OUT(0, 1) = AVG3(A, B, C);
+ OUT(1, 1) = OUT(0, 3) = AVG3(B, C, D);
+ OUT(2, 1) = OUT(1, 3) = AVG3(C, D, E);
+ OUT(3, 1) = OUT(2, 3) = AVG3(D, E, F);
+ OUT(3, 2) = AVG3(E, F, G);
+ OUT(3, 3) = AVG3(F, G, H);
+}
+
+static void HU4(uint8_t* dst, const uint8_t* top) {
+ const int I = top[-2];
+ const int J = top[-3];
+ const int K = top[-4];
+ const int L = top[-5];
+ OUT(0, 0) = AVG2(I, J);
+ OUT(2, 0) = OUT(0, 1) = AVG2(J, K);
+ OUT(2, 1) = OUT(0, 2) = AVG2(K, L);
+ OUT(1, 0) = AVG3(I, J, K);
+ OUT(3, 0) = OUT(1, 1) = AVG3(J, K, L);
+ OUT(3, 1) = OUT(1, 2) = AVG3(K, L, L);
+ OUT(3, 2) = OUT(2, 2) =
+ OUT(0, 3) = OUT(1, 3) = OUT(2, 3) = OUT(3, 3) = L;
+}
+
+static void HD4(uint8_t* dst, const uint8_t* top) {
+ const int X = top[-1];
+ const int I = top[-2];
+ const int J = top[-3];
+ const int K = top[-4];
+ const int L = top[-5];
+ const int A = top[0];
+ const int B = top[1];
+ const int C = top[2];
+
+ OUT(0, 0) = OUT(2, 1) = AVG2(I, X);
+ OUT(0, 1) = OUT(2, 2) = AVG2(J, I);
+ OUT(0, 2) = OUT(2, 3) = AVG2(K, J);
+ OUT(0, 3) = AVG2(L, K);
+
+ OUT(3, 0) = AVG3(A, B, C);
+ OUT(2, 0) = AVG3(X, A, B);
+ OUT(1, 0) = OUT(3, 1) = AVG3(I, X, A);
+ OUT(1, 1) = OUT(3, 2) = AVG3(J, I, X);
+ OUT(1, 2) = OUT(3, 3) = AVG3(K, J, I);
+ OUT(1, 3) = AVG3(L, K, J);
+}
+
+static void TM4(uint8_t* dst, const uint8_t* top) {
+ int x, y;
+ const uint8_t* const clip = clip1 + 255 - top[-1];
+ for (y = 0; y < 4; ++y) {
+ const uint8_t* const clip_table = clip + top[-2 - y];
+ for (x = 0; x < 4; ++x) {
+ dst[x] = clip_table[top[x]];
+ }
+ dst += BPS;
+ }
+}
+
+#undef AVG3
+#undef AVG2
+
+// Left samples are top[-5 .. -2], top_left is top[-1], top are
+// located at top[0..3], and top right is top[4..7]
+static void Intra4Preds(uint8_t* dst, const uint8_t* top) {
+ DC4(I4DC4 + dst, top);
+ TM4(I4TM4 + dst, top);
+ VE4(I4VE4 + dst, top);
+ HE4(I4HE4 + dst, top);
+ RD4(I4RD4 + dst, top);
+ VR4(I4VR4 + dst, top);
+ LD4(I4LD4 + dst, top);
+ VL4(I4VL4 + dst, top);
+ HD4(I4HD4 + dst, top);
+ HU4(I4HU4 + dst, top);
+}
+
+// default C implementations
+VP8Intra4Preds VP8EncPredLuma4 = Intra4Preds;
+VP8IntraPreds VP8EncPredLuma16 = Intra16Preds;
+VP8IntraPreds VP8EncPredChroma8 = IntraChromaPreds;
+
+//-----------------------------------------------------------------------------
+// Metric
+
+static inline int GetSSE(const uint8_t* a, const uint8_t* b, int w, int h) {
+ int count = 0;
+ int y, x;
+ for (y = 0; y < h; ++y) {
+ for (x = 0; x < w; ++x) {
+ const int diff = (int)a[x] - b[x];
+ count += diff * diff;
+ }
+ a += BPS;
+ b += BPS;
+ }
+ return count;
+}
+
+static int SSE16x16(const uint8_t* a, const uint8_t* b) {
+ return GetSSE(a, b, 16, 16);
+}
+static int SSE16x8(const uint8_t* a, const uint8_t* b) {
+ return GetSSE(a, b, 16, 8);
+}
+static int SSE8x8(const uint8_t* a, const uint8_t* b) {
+ return GetSSE(a, b, 8, 8);
+}
+static int SSE4x4(const uint8_t* a, const uint8_t* b) {
+ return GetSSE(a, b, 4, 4);
+}
+
+// default C implementations
+VP8Metric VP8SSE16x16 = SSE16x16;
+VP8Metric VP8SSE8x8 = SSE8x8;
+VP8Metric VP8SSE16x8 = SSE16x8;
+VP8Metric VP8SSE4x4 = SSE4x4;
+
+//-----------------------------------------------------------------------------
+// Texture distortion
+//
+// We try to match the spectral content (weighted) between source and
+// reconstructed samples.
+
+// Hadamard transform
+static void TTransform(const uint8_t* in, int16_t* out) {
+ int tmp[16];
+ int i;
+ for (i = 0; i < 4; ++i, in += BPS) {
+ const int a0 = (in[0] + in[2]) << 2;
+ const int a1 = (in[1] + in[3]) << 2;
+ const int a2 = (in[1] - in[3]) << 2;
+ const int a3 = (in[0] - in[2]) << 2;
+ tmp[0 + i * 4] = a0 + a1 + (a0 != 0);
+ tmp[1 + i * 4] = a3 + a2;
+ tmp[2 + i * 4] = a3 - a2;
+ tmp[3 + i * 4] = a0 - a1;
+ }
+ for (i = 0; i < 4; ++i) {
+ const int a0 = (tmp[0 + i] + tmp[8 + i]);
+ const int a1 = (tmp[4 + i] + tmp[12+ i]);
+ const int a2 = (tmp[4 + i] - tmp[12+ i]);
+ const int a3 = (tmp[0 + i] - tmp[8 + i]);
+ const int b0 = a0 + a1;
+ const int b1 = a3 + a2;
+ const int b2 = a3 - a2;
+ const int b3 = a0 - a1;
+ out[ 0 + i] = (b0 + (b0 < 0) + 3) >> 3;
+ out[ 4 + i] = (b1 + (b1 < 0) + 3) >> 3;
+ out[ 8 + i] = (b2 + (b2 < 0) + 3) >> 3;
+ out[12 + i] = (b3 + (b3 < 0) + 3) >> 3;
+ }
+}
+
+static int Disto4x4(const uint8_t* const a, const uint8_t* const b,
+ const uint16_t* const w) {
+ int16_t tmp1[16], tmp2[16];
+ int k;
+ int D;
+ TTransform(a, tmp1);
+ TTransform(b, tmp2);
+ D = 0;
+ for (k = 0; k < 16; ++k)
+ D += w[k] * (abs(tmp2[k]) - abs(tmp1[k]));
+ return (abs(D) + 8) >> 4;
+}
+
+static int Disto16x16(const uint8_t* const a, const uint8_t* const b,
+ const uint16_t* const w) {
+ int D = 0;
+ int x, y;
+ for (y = 0; y < 16 * BPS; y += 4 * BPS) {
+ for (x = 0; x < 16; x += 4) {
+ D += Disto4x4(a + x + y, b + x + y, w);
+ }
+ }
+ return D;
+}
+
+VP8WMetric VP8TDisto4x4 = Disto4x4;
+VP8WMetric VP8TDisto16x16 = Disto16x16;
+
+//-----------------------------------------------------------------------------
+// Quantization
+//
+
+// Simple quantization
+static int QuantizeBlock(int16_t in[16], int16_t out[16],
+ int n, const VP8Matrix* const mtx) {
+ int last = -1;
+ for (; n < 16; ++n) {
+ const int j = VP8Zigzag[n];
+ const int sign = (in[j] < 0);
+ int coeff = (sign ? -in[j] : in[j]) + mtx->sharpen_[j];
+ if (coeff > 2047) coeff = 2047;
+ if (coeff > mtx->zthresh_[j]) {
+ const int Q = mtx->q_[j];
+ const int iQ = mtx->iq_[j];
+ const int B = mtx->bias_[j];
+ out[n] = QUANTDIV(coeff, iQ, B);
+ if (sign) out[n] = -out[n];
+ in[j] = out[n] * Q;
+ if (out[n]) last = n;
+ } else {
+ out[n] = 0;
+ in[j] = 0;
+ }
+ }
+ return (last >= 0);
+}
+
+// default C implementation
+VP8QuantizeBlock VP8EncQuantizeBlock = QuantizeBlock;
+
+//-----------------------------------------------------------------------------
+// Block copy
+
+static inline void Copy(const uint8_t* src, uint8_t* dst, int size) {
+ int y;
+ for (y = 0; y < size; ++y) {
+ memcpy(dst, src, size);
+ src += BPS;
+ dst += BPS;
+ }
+}
+
+static void Copy4x4(const uint8_t* src, uint8_t* dst) { Copy(src, dst, 4); }
+static void Copy8x8(const uint8_t* src, uint8_t* dst) { Copy(src, dst, 8); }
+static void Copy16x16(const uint8_t* src, uint8_t* dst) { Copy(src, dst, 16); }
+
+// default C implementations
+VP8BlockCopy VP8Copy4x4 = Copy4x4;
+VP8BlockCopy VP8Copy8x8 = Copy8x8;
+VP8BlockCopy VP8Copy16x16 = Copy16x16;
+
+//-----------------------------------------------------------------------------
+
+void VP8EncDspInit(void) {
+ InitTables();
+}
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/src/enc/filter.c b/src/enc/filter.c
new file mode 100644
index 00000000..a0a42b07
--- /dev/null
+++ b/src/enc/filter.c
@@ -0,0 +1,378 @@
+// Copyright 2011 Google Inc.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// Selecting filter level
+//
+// Author: somnath@google.com (Somnath Banerjee)
+
+#include <math.h>
+#include "vp8enci.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+// NOTE: clip1, tables and InitTables are repeated entries of dsp.c
+static uint8_t abs0[255 + 255 + 1]; // abs(i)
+static uint8_t abs1[255 + 255 + 1]; // abs(i)>>1
+static int8_t sclip1[1020 + 1020 + 1]; // clips [-1020, 1020] to [-128, 127]
+static int8_t sclip2[112 + 112 + 1]; // clips [-112, 112] to [-16, 15]
+static uint8_t clip1[255 + 510 + 1]; // clips [-255,510] to [0,255]
+
+static int tables_ok = 0;
+
+static void InitTables(void) {
+ if (!tables_ok) {
+ int i;
+ for (i = -255; i <= 255; ++i) {
+ abs0[255 + i] = (i < 0) ? -i : i;
+ abs1[255 + i] = abs0[255 + i] >> 1;
+ }
+ for (i = -1020; i <= 1020; ++i) {
+ sclip1[1020 + i] = (i < -128) ? -128 : (i > 127) ? 127 : i;
+ }
+ for (i = -112; i <= 112; ++i) {
+ sclip2[112 + i] = (i < -16) ? -16 : (i > 15) ? 15 : i;
+ }
+ for (i = -255; i <= 255 + 255; ++i) {
+ clip1[255 + i] = (i < 0) ? 0 : (i > 255) ? 255 : i;
+ }
+ tables_ok = 1;
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Edge filtering functions
+
+// 4 pixels in, 2 pixels out
+static inline void do_filter2(uint8_t* p, int step) {
+ const int p1 = p[-2*step], p0 = p[-step], q0 = p[0], q1 = p[step];
+ const int a = 3 * (q0 - p0) + sclip1[1020 + p1 - q1];
+ const int a1 = sclip2[112 + ((a + 4) >> 3)];
+ const int a2 = sclip2[112 + ((a + 3) >> 3)];
+ p[-step] = clip1[255 + p0 + a2];
+ p[ 0] = clip1[255 + q0 - a1];
+}
+
+// 4 pixels in, 4 pixels out
+static inline void do_filter4(uint8_t* p, int step) {
+ const int p1 = p[-2*step], p0 = p[-step], q0 = p[0], q1 = p[step];
+ const int a = 3 * (q0 - p0);
+ const int a1 = sclip2[112 + ((a + 4) >> 3)];
+ const int a2 = sclip2[112 + ((a + 3) >> 3)];
+ const int a3 = (a1 + 1) >> 1;
+ p[-2*step] = clip1[255 + p1 + a3];
+ p[- step] = clip1[255 + p0 + a2];
+ p[ 0] = clip1[255 + q0 - a1];
+ p[ step] = clip1[255 + q1 - a3];
+}
+
+// high edge-variance
+static inline int hev(const uint8_t* p, int step, int thresh) {
+ const int p1 = p[-2*step], p0 = p[-step], q0 = p[0], q1 = p[step];
+ return (abs0[255 + p1 - p0] > thresh) || (abs0[255 + q1 - q0] > thresh);
+}
+
+static inline int needs_filter(const uint8_t* p, int step, int thresh) {
+ const int p1 = p[-2*step], p0 = p[-step], q0 = p[0], q1 = p[step];
+ return (2 * abs0[255 + p0 - q0] + abs1[255 + p1 - q1]) <= thresh;
+}
+
+static inline int needs_filter2(const uint8_t* p, int step, int t, int it) {
+ const int p3 = p[-4*step], p2 = p[-3*step], p1 = p[-2*step], p0 = p[-step];
+ const int q0 = p[0], q1 = p[step], q2 = p[2*step], q3 = p[3*step];
+ if ((2 * abs0[255 + p0 - q0] + abs1[255 + p1 - q1]) > t)
+ return 0;
+ return abs0[255 + p3 - p2] <= it && abs0[255 + p2 - p1] <= it &&
+ abs0[255 + p1 - p0] <= it && abs0[255 + q3 - q2] <= it &&
+ abs0[255 + q2 - q1] <= it && abs0[255 + q1 - q0] <= it;
+}
+
+//-----------------------------------------------------------------------------
+// Simple In-loop filtering (Paragraph 15.2)
+
+static void SimpleVFilter16(uint8_t* p, int stride, int thresh) {
+ int i;
+ for (i = 0; i < 16; ++i) {
+ if (needs_filter(p + i, stride, thresh)) {
+ do_filter2(p + i, stride);
+ }
+ }
+}
+
+static void SimpleHFilter16(uint8_t* p, int stride, int thresh) {
+ int i;
+ for (i = 0; i < 16; ++i) {
+ if (needs_filter(p + i * stride, 1, thresh)) {
+ do_filter2(p + i * stride, 1);
+ }
+ }
+}
+
+static void SimpleVFilter16i(uint8_t* p, int stride, int thresh) {
+ int k;
+ for (k = 3; k > 0; --k) {
+ p += 4 * stride;
+ SimpleVFilter16(p, stride, thresh);
+ }
+}
+
+static void SimpleHFilter16i(uint8_t* p, int stride, int thresh) {
+ int k;
+ for (k = 3; k > 0; --k) {
+ p += 4;
+ SimpleHFilter16(p, stride, thresh);
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Complex In-loop filtering (Paragraph 15.3)
+
+static inline void FilterLoop24(uint8_t* p, int hstride, int vstride, int size,
+ int thresh, int ithresh, int hev_thresh) {
+ while (size-- > 0) {
+ if (needs_filter2(p, hstride, thresh, ithresh)) {
+ if (hev(p, hstride, hev_thresh)) {
+ do_filter2(p, hstride);
+ } else {
+ do_filter4(p, hstride);
+ }
+ }
+ p += vstride;
+ }
+}
+
+// on three inner edges
+static void VFilter16i(uint8_t* p, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ int k;
+ for (k = 3; k > 0; --k) {
+ p += 4 * stride;
+ FilterLoop24(p, stride, 1, 16, thresh, ithresh, hev_thresh);
+ }
+}
+
+static void HFilter16i(uint8_t* p, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ int k;
+ for (k = 3; k > 0; --k) {
+ p += 4;
+ FilterLoop24(p, 1, stride, 16, thresh, ithresh, hev_thresh);
+ }
+}
+
+static void VFilter8i(uint8_t* u, uint8_t* v, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ FilterLoop24(u + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh);
+ FilterLoop24(v + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh);
+}
+
+static void HFilter8i(uint8_t* u, uint8_t* v, int stride,
+ int thresh, int ithresh, int hev_thresh) {
+ FilterLoop24(u + 4, 1, stride, 8, thresh, ithresh, hev_thresh);
+ FilterLoop24(v + 4, 1, stride, 8, thresh, ithresh, hev_thresh);
+}
+
+//-----------------------------------------------------------------------------
+
+void (*VP8EncVFilter16i)(uint8_t*, int, int, int, int) = VFilter16i;
+void (*VP8EncHFilter16i)(uint8_t*, int, int, int, int) = HFilter16i;
+void (*VP8EncVFilter8i)(uint8_t*, uint8_t*, int, int, int, int) = VFilter8i;
+void (*VP8EncHFilter8i)(uint8_t*, uint8_t*, int, int, int, int) = HFilter8i;
+
+void (*VP8EncSimpleVFilter16i)(uint8_t*, int, int) = SimpleVFilter16i;
+void (*VP8EncSimpleHFilter16i)(uint8_t*, int, int) = SimpleHFilter16i;
+
+//-----------------------------------------------------------------------------
+// Paragraph 15.4: compute the inner-edge filtering strength
+
+static int GetILevel(int sharpness, int level) {
+ if (sharpness > 0) {
+ if (sharpness > 4) {
+ level >>= 2;
+ } else {
+ level >>= 1;
+ }
+ if (level > 9 - sharpness) {
+ level = 9 - sharpness;
+ }
+ }
+ if (level < 1) level = 1;
+ return level;
+}
+
+static void DoFilter(const VP8EncIterator* const it, int level) {
+ const VP8Encoder* const enc = it->enc_;
+ const int ilevel = GetILevel(enc->config_->filter_sharpness, level);
+ const int limit = 2 * level + ilevel;
+
+ uint8_t* const y_dst = it->yuv_out2_ + Y_OFF;
+ uint8_t* const u_dst = it->yuv_out2_ + U_OFF;
+ uint8_t* const v_dst = it->yuv_out2_ + V_OFF;
+
+ // copy current block to yuv_out2_
+ memcpy(y_dst, it->yuv_out_, YUV_SIZE * sizeof(uint8_t));
+
+ if (enc->filter_hdr_.simple_ == 1) { // simple
+ VP8EncSimpleHFilter16i(y_dst, BPS, limit);
+ VP8EncSimpleVFilter16i(y_dst, BPS, limit);
+ } else { // complex
+ const int hev_thresh = (level >= 40) ? 2 : (level >= 15) ? 1 : 0;
+ VP8EncHFilter16i(y_dst, BPS, limit, ilevel, hev_thresh);
+ VP8EncHFilter8i(u_dst, v_dst, BPS, limit, ilevel, hev_thresh);
+ VP8EncVFilter16i(y_dst, BPS, limit, ilevel, hev_thresh);
+ VP8EncVFilter8i(u_dst, v_dst, BPS, limit, ilevel, hev_thresh);
+ }
+}
+
+//-----------------------------------------------------------------------------
+// SSIM metric
+
+enum { KERNEL = 3 };
+typedef struct {
+ double w, xm, ym, xxm, xym, yym;
+} SSIMStats;
+
+static void Accumulate(const uint8_t* src1, int stride1,
+ const uint8_t* src2, int stride2,
+ int xo, int yo, int W, int H,
+ SSIMStats* const stats) {
+ const int ymin = (yo - KERNEL < 0) ? 0 : yo - KERNEL;
+ const int ymax = (yo + KERNEL > H - 1) ? H - 1 : yo + KERNEL;
+ const int xmin = (xo - KERNEL < 0) ? 0 : xo - KERNEL;
+ const int xmax = (xo + KERNEL > W - 1) ? W - 1 : xo + KERNEL;
+ int x, y;
+ src1 += ymin * stride1;
+ src2 += ymin * stride2;
+ for (y = ymin; y <= ymax; ++y, src1 += stride1, src2 += stride2) {
+ for (x = xmin; x <= xmax; ++x) {
+ const int s1 = src1[x];
+ const int s2 = src2[x];
+ stats->w += 1;
+ stats->xm += s1;
+ stats->ym += s2;
+ stats->xxm += s1 * s1;
+ stats->xym += s1 * s2;
+ stats->yym += s2 * s2;
+ }
+ }
+}
+
+static double GetSSIM(const SSIMStats* const stats) {
+ const double xmxm = stats->xm * stats->xm;
+ const double ymym = stats->ym * stats->ym;
+ const double xmym = stats->xm * stats->ym;
+ const double w2 = stats->w * stats->w;
+ double sxx = stats->xxm * stats->w - xmxm;
+ double syy = stats->yym * stats->w - ymym;
+ double sxy = stats->xym * stats->w - xmym;
+ double C1, C2;
+ double fnum;
+ double fden;
+ // small errors are possible, due to rounding. Clamp to zero.
+ if (sxx < 0.) sxx = 0.;
+ if (syy < 0.) syy = 0.;
+ C1 = 6.5025 * w2;
+ C2 = 58.5225 * w2;
+ fnum = (2 * xmym + C1) * (2 * sxy + C2);
+ fden = (xmxm + ymym + C1) * (sxx + syy + C2);
+ return (fden != 0) ? fnum / fden : 0.;
+}
+
+static double GetMBSSIM(const uint8_t* yuv1, const uint8_t* yuv2) {
+ int x, y;
+ SSIMStats s = { .0, .0, .0, .0, .0, .0 };
+
+ // compute SSIM in a 10 x 10 window
+ for (x = 3; x < 13; x++) {
+ for (y = 3; y < 13; y++) {
+ Accumulate(yuv1 + Y_OFF, BPS, yuv2 + Y_OFF, BPS, x, y, 16, 16, &s);
+ }
+ }
+ for (x = 1; x < 7; x++) {
+ for (y = 1; y < 7; y++) {
+ Accumulate(yuv1 + U_OFF, BPS, yuv2 + U_OFF, BPS, x, y, 8, 8, &s);
+ Accumulate(yuv1 + V_OFF, BPS, yuv2 + V_OFF, BPS, x, y, 8, 8, &s);
+ }
+ }
+ return GetSSIM(&s);
+}
+
+//-----------------------------------------------------------------------------
+// Exposed APIs: Encoder should call the following 3 functions to adjust
+// loop filter strength
+
+void VP8InitFilter(VP8EncIterator* const it) {
+ int s, i;
+ if (!it->lf_stats_) return;
+
+ InitTables();
+ for (s = 0; s < NUM_MB_SEGMENTS; s++) {
+ for (i = 0; i < MAX_LF_LEVELS; i++) {
+ (*it->lf_stats_)[s][i] = 0;
+ }
+ }
+}
+
+void VP8StoreFilterStats(VP8EncIterator* const it) {
+ int d;
+ const int s = it->mb_->segment_;
+ const int level0 = it->enc_->dqm_[s].fstrength_; // TODO: ref_lf_delta[]
+
+ // explore +/-quant range of values around level0
+ const int delta_min = -it->enc_->dqm_[s].quant_;
+ const int delta_max = it->enc_->dqm_[s].quant_;
+ const int step_size = (delta_max - delta_min >= 4) ? 4 : 1;
+
+ if (!it->lf_stats_) return;
+
+ // NOTE: Currently we are applying filter only across the sublock edges
+ // There are two reasons for that.
+ // 1. Applying filter on macro block edges will change the pixels in
+ // the left and top macro blocks. That will be hard to restore
+ // 2. Macro Blocks on the bottom and right are not yet compressed. So we
+ // cannot apply filter on the right and bottom macro block edges.
+ if (it->mb_->type_ == 1 && it->mb_->skip_) return;
+
+ // Always try filter level zero
+ (*it->lf_stats_)[s][0] += GetMBSSIM(it->yuv_in_, it->yuv_out_);
+
+ for (d = delta_min; d <= delta_max; d += step_size) {
+ const int level = level0 + d;
+ if (level <= 0 || level >= MAX_LF_LEVELS) {
+ continue;
+ }
+ DoFilter(it, level);
+ (*it->lf_stats_)[s][level] += GetMBSSIM(it->yuv_in_, it->yuv_out2_);
+ }
+}
+
+void VP8AdjustFilterStrength(VP8EncIterator* const it) {
+ int s;
+ VP8Encoder* const enc = it->enc_;
+
+ if (!it->lf_stats_) {
+ return;
+ }
+ for (s = 0; s < NUM_MB_SEGMENTS; s++) {
+ int i, best_level = 0;
+ // Improvement over filter level 0 should be at least 1e-5 (relatively)
+ double best_v = 1.00001 * (*it->lf_stats_)[s][0];
+ for (i = 1; i < MAX_LF_LEVELS; i++) {
+ const double v = (*it->lf_stats_)[s][i];
+ if (v > best_v) {
+ best_v = v;
+ best_level = i;
+ }
+ }
+ enc->dqm_[s].fstrength_ = best_level;
+ }
+}
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/src/enc/frame.c b/src/enc/frame.c
new file mode 100644
index 00000000..9864c1d9
--- /dev/null
+++ b/src/enc/frame.c
@@ -0,0 +1,695 @@
+// Copyright 2011 Google Inc.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// frame coding and analysis
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <math.h>
+
+#include "vp8enci.h"
+#include "cost.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+#define SEGMENT_VISU 0
+#define DEBUG_SEARCH 0 // useful to track search convergence
+
+// On-the-fly info about the current set of residuals. Handy to avoid
+// passing zillions of params.
+typedef struct {
+ int first;
+ int last;
+ const int16_t* coeffs;
+
+ int coeff_type;
+ ProbaArray* prob;
+ StatsArray* stats;
+ CostArray* cost;
+} VP8Residual;
+
+//-----------------------------------------------------------------------------
+// Tables for level coding
+
+const uint8_t VP8EncBands[16 + 1] = {
+ 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7,
+ 0 // sentinel
+};
+
+static const uint8_t kCat3[] = { 173, 148, 140 };
+static const uint8_t kCat4[] = { 176, 155, 140, 135 };
+static const uint8_t kCat5[] = { 180, 157, 141, 134, 130 };
+static const uint8_t kCat6[] =
+ { 254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129 };
+
+//-----------------------------------------------------------------------------
+// Reset the statistics about: number of skips, token proba, level cost,...
+
+static void ResetStats(VP8Encoder* const enc, int precalc_cost) {
+ VP8Proba* const proba = &enc->proba_;
+ if (precalc_cost) VP8CalculateLevelCosts(proba);
+ proba->nb_skip_ = 0;
+ proba->nb_i4_ = 0;
+ proba->nb_i16_ = 0;
+}
+
+//-----------------------------------------------------------------------------
+// Skip decision probability
+
+static int CalcSkipProba(uint64_t nb, uint64_t total) {
+ return (int)(total ? (total - nb) * 255 / total : 255);
+}
+
+// Returns the bit-cost for coding the skip probability.
+static int FinalizeSkipProba(VP8Encoder* const enc) {
+ VP8Proba* const proba = &enc->proba_;
+ const int nb_mbs = enc->mb_w_ * enc->mb_h_;
+ const int nb_events = proba->nb_skip_;
+ int size;
+ proba->skip_proba_ = CalcSkipProba(nb_events, nb_mbs);
+ proba->use_skip_proba_ = (proba->skip_proba_ < 250);
+ size = 256; // 'use_skip_proba' bit
+ if (proba->use_skip_proba_) {
+ size += nb_events * VP8BitCost(1, proba->skip_proba_)
+ + (nb_mbs - nb_events) * VP8BitCost(0, proba->skip_proba_);
+ size += 8 * 256; // cost of signaling the skip_proba_ itself.
+ }
+ return size;
+}
+
+//-----------------------------------------------------------------------------
+// Recording of token probabilities.
+
+static void ResetTokenStats(VP8Encoder* const enc) {
+ VP8Proba* const proba = &enc->proba_;
+ memset(proba->stats_, 0, sizeof(proba->stats_));
+}
+
+// Record proba context used
+static int Record(int bit, uint64_t* const stats) {
+ stats[0] += bit;
+ stats[1] += 1;
+ return bit;
+}
+
+// Simulate block coding, but only record statistics.
+// Note: no need to record the fixed probas.
+static int RecordCoeffs(int ctx, VP8Residual* res) {
+ int n = res->first;
+ uint64_t (*s)[2] = res->stats[VP8EncBands[n]][ctx];
+ if (!Record(res->last >= 0, s[0])) {
+ return 0;
+ }
+
+ while (1) {
+ const int v = abs(res->coeffs[n++]);
+ if (!Record(v != 0, s[1])) {
+ s = res->stats[VP8EncBands[n]][0];
+ continue;
+ }
+ if (!Record(v > 1, s[2])) {
+ s = res->stats[VP8EncBands[n]][1];
+ } else {
+ if (!Record(v > 4, s[3])) {
+ if (Record(v != 2, s[4]))
+ Record(v == 4, s[5]);
+ } else if (!Record(v > 10, s[6])) {
+ Record(v > 6, s[7]);
+ } else if (!Record((v >= 3 + (8 << 2)), s[8])) {
+ Record((v >= 3 + (8 << 1)), s[9]);
+ } else {
+ Record((v >= 3 + (8 << 3)), s[10]);
+ }
+ s = res->stats[VP8EncBands[n]][2];
+ }
+ if (n == 16 || !Record(n <= res->last, s[0])) {
+ return 1;
+ }
+ }
+}
+
+// Collect statistics and deduce probabilities for next coding pass.
+// Return the total bit-cost for coding the probability updates.
+static int CalcTokenProba(uint64_t nb, uint64_t total) {
+ return (int)(nb ? ((total - nb) * 255 + total / 2) / total : 255);
+}
+
+static int FinalizeTokenProbas(VP8Encoder* const enc) {
+ VP8Proba* const proba = &enc->proba_;
+ int size = 0;
+ int t, b, c, p;
+ for (t = 0; t < NUM_TYPES; ++t) {
+ for (b = 0; b < NUM_BANDS; ++b) {
+ for (c = 0; c < NUM_CTX; ++c) {
+ for (p = 0; p < NUM_PROBAS; ++p) {
+ const uint64_t* const cnt = proba->stats_[t][b][c][p];
+ const int update_proba = VP8CoeffsUpdateProba[t][b][c][p];
+ const int old_p = VP8CoeffsProba0[t][b][c][p];
+ const int new_p = CalcTokenProba(cnt[0], cnt[1]);
+ const uint64_t old_cost = VP8BranchCost(cnt[0], cnt[1], old_p)
+ + VP8BitCost(0, update_proba);
+ const uint64_t new_cost = VP8BranchCost(cnt[0], cnt[1], new_p)
+ + VP8BitCost(1, update_proba) + 8 * 256;
+ const int use_new_p = (old_cost > new_cost);
+ size += VP8BitCost(use_new_p, update_proba);
+ if (use_new_p) { // only use proba that seem meaningful enough.
+ proba->coeffs_[t][b][c][p] = new_p;
+ size += 8 * 256;
+ } else {
+ proba->coeffs_[t][b][c][p] = old_p;
+ }
+ }
+ }
+ }
+ }
+ return size;
+}
+
+//-----------------------------------------------------------------------------
+// helper functions for residuals struct VP8Residual.
+
+static void InitResidual(int first, int coeff_type,
+ VP8Encoder* const enc, VP8Residual* const res) {
+ res->coeff_type = coeff_type;
+ res->prob = enc->proba_.coeffs_[coeff_type];
+ res->stats = enc->proba_.stats_[coeff_type];
+ res->cost = enc->proba_.level_cost_[coeff_type];
+ res->first = first;
+}
+
+static void SetResidualCoeffs(const int16_t* const coeffs,
+ VP8Residual* const res) {
+ int n;
+ res->last = -1;
+ for (n = 15; n >= res->first; --n) {
+ if (coeffs[n]) {
+ res->last = n;
+ break;
+ }
+ }
+ res->coeffs = coeffs;
+}
+
+//-----------------------------------------------------------------------------
+// Mode costs
+
+static int GetResidualCost(int ctx, const VP8Residual* const res) {
+ int n = res->first;
+ const uint8_t* p = res->prob[VP8EncBands[n]][ctx];
+ const uint16_t *t = res->cost[VP8EncBands[n]][ctx];
+ int cost;
+
+ cost = VP8BitCost(res->last >= 0, p[0]);
+ if (res->last < 0) {
+ return cost;
+ }
+ while (n <= res->last) {
+ const int v = abs(res->coeffs[n++]);
+ cost += VP8LevelCost(t, v);
+ if (v == 0) {
+ p = res->prob[VP8EncBands[n]][0];
+ t = res->cost[VP8EncBands[n]][0];
+ continue;
+ } else if (v == 1) {
+ p = res->prob[VP8EncBands[n]][1];
+ t = res->cost[VP8EncBands[n]][1];
+ } else {
+ p = res->prob[VP8EncBands[n]][2];
+ t = res->cost[VP8EncBands[n]][2];
+ }
+ if (n < 16) {
+ cost += VP8BitCost(n <= res->last, p[0]);
+ }
+ }
+ return cost;
+}
+
+int VP8GetCostLuma4(VP8EncIterator* const it, const int16_t levels[16]) {
+ const int x = (it->i4_ & 3), y = (it->i4_ >> 2);
+ VP8Residual res;
+ int R = 0;
+ int ctx;
+
+ InitResidual(0, 3, it->enc_, &res);
+ ctx = it->top_nz_[x] + it->left_nz_[y];
+ SetResidualCoeffs(levels, &res);
+ R += GetResidualCost(ctx, &res);
+ return R;
+}
+
+int VP8GetCostLuma16(VP8EncIterator* const it, const VP8ModeScore* const rd) {
+ VP8Residual res;
+ int x, y;
+ int R = 0;
+
+ VP8IteratorNzToBytes(it); // re-import the non-zero context
+
+ // DC
+ InitResidual(0, 1, it->enc_, &res);
+ SetResidualCoeffs(rd->y_dc_levels, &res);
+ R += GetResidualCost(it->top_nz_[8] + it->left_nz_[8], &res);
+
+ // AC
+ InitResidual(1, 0, it->enc_, &res);
+ for (y = 0; y < 4; ++y) {
+ for (x = 0; x < 4; ++x) {
+ const int ctx = it->top_nz_[x] + it->left_nz_[y];
+ SetResidualCoeffs(rd->y_ac_levels[x + y * 4], &res);
+ R += GetResidualCost(ctx, &res);
+ it->top_nz_[x] = it->left_nz_[y] = (res.last >= 0);
+ }
+ }
+ return R;
+}
+
+int VP8GetCostUV(VP8EncIterator* const it, const VP8ModeScore* const rd) {
+ VP8Residual res;
+ int ch, x, y;
+ int R = 0;
+
+ VP8IteratorNzToBytes(it); // re-import the non-zero context
+
+ InitResidual(0, 2, it->enc_, &res);
+ for (ch = 0; ch <= 2; ch += 2) {
+ for (y = 0; y < 2; ++y) {
+ for (x = 0; x < 2; ++x) {
+ const int ctx = it->top_nz_[4 + ch + x] + it->left_nz_[4 + ch + y];
+ SetResidualCoeffs(rd->uv_levels[ch * 2 + x + y * 2], &res);
+ R += GetResidualCost(ctx, &res);
+ it->top_nz_[4 + ch + x] = it->left_nz_[4 + ch + y] = (res.last >= 0);
+ }
+ }
+ }
+ return R;
+}
+
+//-----------------------------------------------------------------------------
+// Coefficient coding
+
+static int PutCoeffs(VP8BitWriter* const bw, int ctx, const VP8Residual* res) {
+ int n = res->first;
+ const uint8_t* p = res->prob[VP8EncBands[n]][ctx];
+ if (!VP8PutBit(bw, res->last >= 0, p[0])) {
+ return 0;
+ }
+
+ while (n < 16) {
+ const int c = res->coeffs[n++];
+ const int sign = c < 0;
+ int v = sign ? -c : c;
+ if (!VP8PutBit(bw, v != 0, p[1])) {
+ p = res->prob[VP8EncBands[n]][0];
+ continue;
+ }
+ if (!VP8PutBit(bw, v > 1, p[2])) {
+ p = res->prob[VP8EncBands[n]][1];
+ } else {
+ if (!VP8PutBit(bw, v > 4, p[3])) {
+ if (VP8PutBit(bw, v != 2, p[4]))
+ VP8PutBit(bw, v == 4, p[5]);
+ } else if (!VP8PutBit(bw, v > 10, p[6])) {
+ if (!VP8PutBit(bw, v > 6, p[7])) {
+ VP8PutBit(bw, v == 6, 159);
+ } else {
+ VP8PutBit(bw, v >= 9, 165);
+ VP8PutBit(bw, !(v & 1), 145);
+ }
+ } else {
+ int mask;
+ const uint8_t* tab;
+ if (v < 3 + (8 << 1)) { // kCat3 (3b)
+ VP8PutBit(bw, 0, p[8]);
+ VP8PutBit(bw, 0, p[9]);
+ v -= 3 + (8 << 0);
+ mask = 1 << 2;
+ tab = kCat3;
+ } else if (v < 3 + (8 << 2)) { // kCat4 (4b)
+ VP8PutBit(bw, 0, p[8]);
+ VP8PutBit(bw, 1, p[9]);
+ v -= 3 + (8 << 1);
+ mask = 1 << 3;
+ tab = kCat4;
+ } else if (v < 3 + (8 << 3)) { // kCat5 (5b)
+ VP8PutBit(bw, 1, p[8]);
+ VP8PutBit(bw, 0, p[10]);
+ v -= 3 + (8 << 2);
+ mask = 1 << 4;
+ tab = kCat5;
+ } else { // kCat6 (11b)
+ VP8PutBit(bw, 1, p[8]);
+ VP8PutBit(bw, 1, p[10]);
+ v -= 3 + (8 << 3);
+ mask = 1 << 10;
+ tab = kCat6;
+ }
+ while (mask) {
+ VP8PutBit(bw, !!(v & mask), *tab++);
+ mask >>= 1;
+ }
+ }
+ p = res->prob[VP8EncBands[n]][2];
+ }
+ VP8PutBitUniform(bw, sign);
+ if (n == 16 || !VP8PutBit(bw, n <= res->last, p[0])) {
+ return 1; // EOB
+ }
+ }
+ return 1;
+}
+
+static void CodeResiduals(VP8BitWriter* const bw,
+ VP8EncIterator* const it,
+ const VP8ModeScore* const rd) {
+ int x, y, ch;
+ VP8Residual res;
+ uint64_t pos1, pos2, pos3;
+ const int i16 = (it->mb_->type_ == 1);
+ const int segment = it->mb_->segment_;
+
+ VP8IteratorNzToBytes(it);
+
+ pos1 = VP8BitWriterPos(bw);
+ if (i16) {
+ InitResidual(0, 1, it->enc_, &res);
+ SetResidualCoeffs(rd->y_dc_levels, &res);
+ it->top_nz_[8] = it->left_nz_[8] =
+ PutCoeffs(bw, it->top_nz_[8] + it->left_nz_[8], &res);
+ InitResidual(1, 0, it->enc_, &res);
+ } else {
+ InitResidual(0, 3, it->enc_, &res);
+ }
+
+ // luma-AC
+ for (y = 0; y < 4; ++y) {
+ for (x = 0; x < 4; ++x) {
+ const int ctx = it->top_nz_[x] + it->left_nz_[y];
+ SetResidualCoeffs(rd->y_ac_levels[x + y * 4], &res);
+ it->top_nz_[x] = it->left_nz_[y] = PutCoeffs(bw, ctx, &res);
+ }
+ }
+ pos2 = VP8BitWriterPos(bw);
+
+ // U/V
+ InitResidual(0, 2, it->enc_, &res);
+ for (ch = 0; ch <= 2; ch += 2) {
+ for (y = 0; y < 2; ++y) {
+ for (x = 0; x < 2; ++x) {
+ const int ctx = it->top_nz_[4 + ch + x] + it->left_nz_[4 + ch + y];
+ SetResidualCoeffs(rd->uv_levels[ch * 2 + x + y * 2], &res);
+ it->top_nz_[4 + ch + x] = it->left_nz_[4 + ch + y] =
+ PutCoeffs(bw, ctx, &res);
+ }
+ }
+ }
+ pos3 = VP8BitWriterPos(bw);
+ it->luma_bits_ = pos2 - pos1;
+ it->uv_bits_ = pos3 - pos2;
+ it->bit_count_[segment][i16] += it->luma_bits_;
+ it->bit_count_[segment][2] += it->uv_bits_;
+ VP8IteratorBytesToNz(it);
+}
+
+// Same as CodeResiduals, but doesn't actually write anything.
+// Instead, it just records the event distribution.
+static void RecordResiduals(VP8EncIterator* const it,
+ const VP8ModeScore* const rd) {
+ int x, y, ch;
+ VP8Residual res;
+
+ VP8IteratorNzToBytes(it);
+
+ if (it->mb_->type_ == 1) { // i16x16
+ InitResidual(0, 1, it->enc_, &res);
+ SetResidualCoeffs(rd->y_dc_levels, &res);
+ it->top_nz_[8] = it->left_nz_[8] =
+ RecordCoeffs(it->top_nz_[8] + it->left_nz_[8], &res);
+ InitResidual(1, 0, it->enc_, &res);
+ } else {
+ InitResidual(0, 3, it->enc_, &res);
+ }
+
+ // luma-AC
+ for (y = 0; y < 4; ++y) {
+ for (x = 0; x < 4; ++x) {
+ const int ctx = it->top_nz_[x] + it->left_nz_[y];
+ SetResidualCoeffs(rd->y_ac_levels[x + y * 4], &res);
+ it->top_nz_[x] = it->left_nz_[y] = RecordCoeffs(ctx, &res);
+ }
+ }
+
+ // U/V
+ InitResidual(0, 2, it->enc_, &res);
+ for (ch = 0; ch <= 2; ch += 2) {
+ for (y = 0; y < 2; ++y) {
+ for (x = 0; x < 2; ++x) {
+ const int ctx = it->top_nz_[4 + ch + x] + it->left_nz_[4 + ch + y];
+ SetResidualCoeffs(rd->uv_levels[ch * 2 + x + y * 2], &res);
+ it->top_nz_[4 + ch + x] = it->left_nz_[4 + ch + y] =
+ RecordCoeffs(ctx, &res);
+ }
+ }
+ }
+
+ VP8IteratorBytesToNz(it);
+}
+
+//-----------------------------------------------------------------------------
+// ExtraInfo map / Debug function
+
+#if SEGMENT_VISU
+static void SetBlock(uint8_t* p, int value, int size) {
+ int y;
+ for (y = 0; y < size; ++y) {
+ memset(p, value, size);
+ p += BPS;
+ }
+}
+#endif
+
+static void ResetSSE(VP8Encoder* const enc) {
+ memset(enc->sse_, 0, sizeof(enc->sse_));
+ enc->sse_count_ = 0;
+}
+
+static void StoreSSE(const VP8EncIterator* const it) {
+ VP8Encoder* const enc = it->enc_;
+ const uint8_t* const in = it->yuv_in_;
+ const uint8_t* const out = it->yuv_out_;
+ // Note: not totally accurate at boundary. And doesn't include in-loop filter.
+ enc->sse_[0] += VP8SSE16x16(in + Y_OFF, out + Y_OFF);
+ enc->sse_[1] += VP8SSE8x8(in + U_OFF, out + U_OFF);
+ enc->sse_[2] += VP8SSE8x8(in + V_OFF, out + V_OFF);
+ enc->sse_count_ += 16 * 16;
+}
+
+static void StoreSideInfo(const VP8EncIterator* const it) {
+ VP8Encoder* const enc = it->enc_;
+ const VP8MBInfo* const mb = it->mb_;
+ WebPPicture* const pic = enc->pic_;
+
+ if (pic->stats) {
+ StoreSSE(it);
+ enc->block_count_[0] += (mb->type_ == 0);
+ enc->block_count_[1] += (mb->type_ == 1);
+ enc->block_count_[2] += (mb->skip_ != 0);
+ }
+
+ if (pic->extra_info) {
+ uint8_t* const info = &pic->extra_info[it->x_ + it->y_ * enc->mb_w_];
+ switch(pic->extra_info_type) {
+ case 1: *info = mb->type_; break;
+ case 2: *info = mb->segment_; break;
+ case 3: *info = enc->dqm_[mb->segment_].quant_; break;
+ case 4: *info = (mb->type_ == 1) ? it->preds_[0] : 0xff; break;
+ case 5: *info = mb->uv_mode_; break;
+ case 6: {
+ const int b = (int)((it->luma_bits_ + it->uv_bits_ + 7) >> 3);
+ *info = (b > 255) ? 255 : b; break;
+ }
+ default: *info = 0; break;
+ };
+ }
+#if SEGMENT_VISU // visualize segments and prediction modes
+ SetBlock(it->yuv_out_ + Y_OFF, mb->segment_ * 64, 16);
+ SetBlock(it->yuv_out_ + U_OFF, it->preds_[0] * 64, 8);
+ SetBlock(it->yuv_out_ + V_OFF, mb->uv_mode_ * 64, 8);
+#endif
+}
+
+//-----------------------------------------------------------------------------
+// Main loops
+//
+// VP8EncLoop(): does the final bitstream coding.
+
+static void ResetAfterSkip(VP8EncIterator* const it) {
+ if (it->mb_->type_ == 1) {
+ *it->nz_ = 0; // reset all predictors
+ it->left_nz_[8] = 0;
+ } else {
+ *it->nz_ &= (1 << 24); // preserve the dc_nz bit
+ }
+}
+
+int VP8EncLoop(VP8Encoder* const enc) {
+ int i, s, p;
+ VP8EncIterator it;
+ VP8ModeScore info;
+ const int dont_use_skip = !enc->proba_.use_skip_proba_;
+ const int rd_opt = enc->rd_opt_level_;
+ const int kAverageBytesPerMB = 5; // TODO: have a kTable[quality/10]
+ const int bytes_per_parts =
+ enc->mb_w_ * enc->mb_h_ * kAverageBytesPerMB / enc->num_parts_;
+
+ // Initialize the bit-writers
+ for (p = 0; p < enc->num_parts_; ++p) {
+ VP8BitWriterInit(enc->parts_ + p, bytes_per_parts);
+ }
+
+ ResetStats(enc, rd_opt != 0);
+ ResetSSE(enc);
+
+ VP8IteratorInit(enc, &it);
+ VP8InitFilter(&it);
+ do {
+ VP8IteratorImport(&it);
+ // Warning! order is important: first call VP8Decimate() and
+ // *then* decide how to code the skip decision if there's one.
+ if (!VP8Decimate(&it, &info, rd_opt) || dont_use_skip) {
+ CodeResiduals(it.bw_, &it, &info);
+ } else { // reset predictors after a skip
+ ResetAfterSkip(&it);
+ }
+ StoreSideInfo(&it);
+ VP8StoreFilterStats(&it);
+ VP8IteratorExport(&it);
+ } while (VP8IteratorNext(&it, it.yuv_out_));
+ VP8AdjustFilterStrength(&it);
+
+ // Finalize the partitions
+ for (p = 0; p < enc->num_parts_; ++p) {
+ VP8BitWriterFinish(enc->parts_ + p);
+ }
+ // and byte counters
+ if (enc->pic_->stats) {
+ for (i = 0; i <= 2; ++i) {
+ for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
+ enc->residual_bytes_[i][s] = (int)((it.bit_count_[s][i] + 7) >> 3);
+ }
+ }
+ }
+ return 1;
+}
+
+//-----------------------------------------------------------------------------
+// VP8StatLoop(): only collect statistics (number of skips, token usage, ...)
+// This is used for deciding optimal probabilities. It also
+// modifies the quantizer value if some target (size, PNSR)
+// was specified.
+
+#define kHeaderSizeEstimate (15 + 20 + 10) // TODO: fix better
+
+static int OneStatPass(VP8Encoder* const enc, float q, int rd_opt, int nb_mbs,
+ float* const PSNR) {
+ VP8EncIterator it;
+ uint64_t size = 0;
+ uint64_t distortion = 0;
+ const uint64_t pixel_count = nb_mbs * 384;
+
+ // Make sure the quality parameter is inside valid bounds
+ if (q < 0.) {
+ q = 0;
+ } else if (q > 100.) {
+ q = 100;
+ }
+
+ VP8SetSegmentParams(enc, q); // setup segment quantizations and filters
+
+ ResetStats(enc, rd_opt != 0);
+ ResetTokenStats(enc);
+
+ VP8IteratorInit(enc, &it);
+ do {
+ VP8ModeScore info;
+ VP8IteratorImport(&it);
+ if (VP8Decimate(&it, &info, rd_opt)) {
+ // Just record the number of skips and act like skip_proba is not used.
+ enc->proba_.nb_skip_++;
+ }
+ RecordResiduals(&it, &info);
+ size += info.R;
+ distortion += info.D;
+ } while (VP8IteratorNext(&it, it.yuv_out_) && --nb_mbs > 0);
+ size += FinalizeSkipProba(enc);
+ size += FinalizeTokenProbas(enc);
+ size += enc->segment_hdr_.size_;
+ size = ((size + 1024) >> 11) + kHeaderSizeEstimate;
+
+ if (PSNR) {
+ *PSNR = (float)(10.* log10(255. * 255. * pixel_count / distortion));
+ }
+ return (int)size;
+}
+
+// successive refinement increments.
+static const int dqs[] = { 20, 15, 10, 8, 6, 4, 2, 1, 0 };
+
+int VP8StatLoop(VP8Encoder* const enc) {
+ const int do_search =
+ (enc->config_->target_size > 0 || enc->config_->target_PSNR > 0);
+ const int fast_probe = (enc->method_ < 2 && !do_search);
+ float q = enc->config_->quality;
+ int pass;
+ int nb_mbs;
+
+ // Fast mode: quick analysis pass over few mbs. Better than nothing.
+ nb_mbs = enc->mb_w_ * enc->mb_h_;
+ if (fast_probe && nb_mbs > 100) nb_mbs = 100;
+
+ // No target size: just do several pass without changing 'q'
+ if (!do_search) {
+ for (pass = 0; pass < enc->config_->pass; ++pass) {
+ const int rd_opt = (enc->method_ > 2);
+ OneStatPass(enc, q, rd_opt, nb_mbs, NULL);
+ }
+ return 1;
+ }
+
+ // binary search for a size close to target
+ for (pass = 0; pass < enc->config_->pass || (dqs[pass] > 0); ++pass) {
+ const int rd_opt = 1;
+ float PSNR;
+ int criterion;
+ const int size = OneStatPass(enc, q, rd_opt, nb_mbs, &PSNR);
+#if DEBUG_SEARCH
+ printf("#%d size=%d PSNR=%.2f q=%.2f\n", pass, size, PSNR, q);
+#endif
+
+ if (enc->config_->target_PSNR > 0) {
+ criterion = (PSNR < enc->config_->target_PSNR);
+ } else {
+ criterion = (size < enc->config_->target_size);
+ }
+ // dichotomize
+ if (criterion) {
+ q += dqs[pass];
+ } else {
+ q -= dqs[pass];
+ }
+ }
+ return 1;
+}
+
+//-----------------------------------------------------------------------------
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/src/enc/iterator.c b/src/enc/iterator.c
new file mode 100644
index 00000000..991644d0
--- /dev/null
+++ b/src/enc/iterator.c
@@ -0,0 +1,406 @@
+// Copyright 2011 Google Inc.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// VP8Iterator: block iterator
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#include <stdlib.h>
+#include <string.h>
+#include "vp8enci.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+//-----------------------------------------------------------------------------
+// VP8Iterator
+//-----------------------------------------------------------------------------
+
+static void InitLeft(VP8EncIterator* const it) {
+ const VP8Encoder* const enc = it->enc_;
+ enc->y_left_[-1] = enc->u_left_[-1] = enc->v_left_[-1] =
+ (it->y_) > 0 ? 129 : 127;
+ memset(enc->y_left_, 129, 16);
+ memset(enc->u_left_, 129, 8);
+ memset(enc->v_left_, 129, 8);
+ it->left_nz_[8] = 0;
+}
+
+static void InitTop(VP8EncIterator* const it) {
+ const VP8Encoder* const enc = it->enc_;
+ const int top_size = enc->mb_w_ * 16;
+ memset(enc->y_top_, 127, 2 * top_size);
+ memset(enc->nz_, 0, enc->mb_w_ * sizeof(*enc->nz_));
+}
+
+void VP8IteratorReset(VP8EncIterator* const it) {
+ VP8Encoder* const enc = it->enc_;
+ it->x_ = 0;
+ it->y_ = 0;
+ it->y_offset_ = 0;
+ it->uv_offset_ = 0;
+ it->mb_ = enc->mb_info_;
+ it->preds_ = enc->preds_;
+ it->nz_ = enc->nz_;
+ it->bw_ = &enc->parts_[0];
+ it->done_ = enc->mb_w_* enc->mb_h_;
+ InitTop(it);
+ InitLeft(it);
+ memset(it->bit_count_, 0, sizeof(it->bit_count_));
+ it->do_trellis_ = 0;
+}
+
+void VP8IteratorInit(VP8Encoder* const enc, VP8EncIterator* const it) {
+ it->enc_ = enc;
+ it->y_stride_ = enc->pic_->y_stride;
+ it->uv_stride_ = enc->pic_->uv_stride;
+ // TODO(later): for multithreading, these should be owned by 'it'.
+ it->yuv_in_ = enc->yuv_in_;
+ it->yuv_out_ = enc->yuv_out_;
+ it->yuv_out2_ = enc->yuv_out2_;
+ it->yuv_p_ = enc->yuv_p_;
+ it->lf_stats_ = enc->lf_stats_;
+ VP8IteratorReset(it);
+}
+
+//-----------------------------------------------------------------------------
+// Import the source samples into the cache. Takes care of replicating
+// boundary pixels if necessary.
+
+void VP8IteratorImport(const VP8EncIterator* const it) {
+ const VP8Encoder* const enc = it->enc_;
+ const int x = it->x_, y = it->y_;
+ const WebPPicture* const pic = enc->pic_;
+ const uint8_t* ysrc = pic->y + (y * pic->y_stride + x) * 16;
+ const uint8_t* usrc = pic->u + (y * pic->uv_stride + x) * 8;
+ const uint8_t* vsrc = pic->v + (y * pic->uv_stride + x) * 8;
+ uint8_t* ydst = it->yuv_in_ + Y_OFF;
+ uint8_t* udst = it->yuv_in_ + U_OFF;
+ uint8_t* vdst = it->yuv_in_ + V_OFF;
+ int w = (pic->width - x * 16);
+ int h = (pic->height - y * 16);
+ int i;
+
+ if (w > 16) w = 16;
+ if (h > 16) h = 16;
+ // Luma plane
+ for (i = 0; i < h; ++i) {
+ memcpy(ydst, ysrc, w);
+ if (w < 16) memset(ydst + w, ydst[w - 1], 16 - w);
+ ydst += BPS;
+ ysrc += pic->y_stride;
+ }
+ for (i = h; i < 16; ++i) {
+ memcpy(ydst, ydst - BPS, 16);
+ ydst += BPS;
+ }
+ // U/V plane
+ w = (w + 1) / 2;
+ h = (h + 1) / 2;
+ for (i = 0; i < h; ++i) {
+ memcpy(udst, usrc, w);
+ memcpy(vdst, vsrc, w);
+ if (w < 8) {
+ memset(udst + w, udst[w - 1], 8 - w);
+ memset(vdst + w, vdst[w - 1], 8 - w);
+ }
+ udst += BPS;
+ vdst += BPS;
+ usrc += pic->uv_stride;
+ vsrc += pic->uv_stride;
+ }
+ for (i = h; i < 8; ++i) {
+ memcpy(udst, udst - BPS, 8);
+ memcpy(vdst, vdst - BPS, 8);
+ udst += BPS;
+ vdst += BPS;
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Copy back the compressed samples into user space if requested.
+
+void VP8IteratorExport(const VP8EncIterator* const it) {
+ const VP8Encoder* const enc = it->enc_;
+ if (enc->config_->show_compressed) {
+ const int x = it->x_, y = it->y_;
+ const uint8_t* const ysrc = it->yuv_out_ + Y_OFF;
+ const uint8_t* const usrc = it->yuv_out_ + U_OFF;
+ const uint8_t* const vsrc = it->yuv_out_ + V_OFF;
+ const WebPPicture* const pic = enc->pic_;
+ uint8_t* ydst = pic->y + (y * pic->y_stride + x) * 16;
+ uint8_t* udst = pic->u + (y * pic->uv_stride + x) * 8;
+ uint8_t* vdst = pic->v + (y * pic->uv_stride + x) * 8;
+ int w = (pic->width - x * 16);
+ int h = (pic->height - y * 16);
+ int i;
+
+ if (w > 16) w = 16;
+ if (h > 16) h = 16;
+
+ // Luma plane
+ for (i = 0; i < h; ++i) {
+ memcpy(ydst + i * pic->y_stride, ysrc + i * BPS, w);
+ }
+ // U/V plane
+ w = (w + 1) / 2;
+ h = (h + 1) / 2;
+ for (i = 0; i < h; ++i) {
+ memcpy(udst + i * pic->uv_stride, usrc + i * BPS, w);
+ memcpy(vdst + i * pic->uv_stride, vsrc + i * BPS, w);
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Non-zero contexts setup/teardown
+
+// Nz bits:
+// 0 1 2 3 Y
+// 4 5 6 7
+// 8 9 10 11
+// 12 13 14 15
+// 16 17 U
+// 18 19
+// 20 21 V
+// 22 23
+// 24 DC-intra16
+
+// Convert packed context to byte array
+#define BIT(nz, n) (!!((nz) & (1 << (n))))
+
+void VP8IteratorNzToBytes(VP8EncIterator* const it) {
+ const int tnz = it->nz_[0], lnz = it->nz_[-1];
+
+ // Top-Y
+ it->top_nz_[0] = BIT(tnz, 12);
+ it->top_nz_[1] = BIT(tnz, 13);
+ it->top_nz_[2] = BIT(tnz, 14);
+ it->top_nz_[3] = BIT(tnz, 15);
+ // Top-U
+ it->top_nz_[4] = BIT(tnz, 18);
+ it->top_nz_[5] = BIT(tnz, 19);
+ // Top-V
+ it->top_nz_[6] = BIT(tnz, 22);
+ it->top_nz_[7] = BIT(tnz, 23);
+ // DC
+ it->top_nz_[8] = BIT(tnz, 24);
+
+ // left-Y
+ it->left_nz_[0] = BIT(lnz, 3);
+ it->left_nz_[1] = BIT(lnz, 7);
+ it->left_nz_[2] = BIT(lnz, 11);
+ it->left_nz_[3] = BIT(lnz, 15);
+ // left-U
+ it->left_nz_[4] = BIT(lnz, 17);
+ it->left_nz_[5] = BIT(lnz, 19);
+ // left-V
+ it->left_nz_[6] = BIT(lnz, 21);
+ it->left_nz_[7] = BIT(lnz, 23);
+ // left-DC is special, iterated separately
+}
+
+void VP8IteratorBytesToNz(VP8EncIterator* const it) {
+ uint32_t nz = 0;
+ // top
+ nz |= (it->top_nz_[0] << 12) | (it->top_nz_[1] << 13);
+ nz |= (it->top_nz_[2] << 14) | (it->top_nz_[3] << 15);
+ nz |= (it->top_nz_[4] << 18) | (it->top_nz_[5] << 19);
+ nz |= (it->top_nz_[6] << 22) | (it->top_nz_[7] << 23);
+ nz |= (it->top_nz_[8] << 24); // we propagate the _top_ bit, esp. for intra4
+ // left
+ nz |= (it->left_nz_[0] << 3) | (it->left_nz_[1] << 7) | (it->left_nz_[2] << 11);
+ nz |= (it->left_nz_[4] << 17) | (it->left_nz_[6] << 21);
+
+ *it->nz_ = nz;
+}
+
+#undef BIT
+
+//-----------------------------------------------------------------------------
+// Advance to the next position, doing the bookeeping.
+
+int VP8IteratorNext(VP8EncIterator* const it,
+ const uint8_t* const block_to_save) {
+ VP8Encoder* const enc = it->enc_;
+ if (block_to_save) {
+ const int x = it->x_, y = it->y_;
+ const uint8_t* const ysrc = block_to_save + Y_OFF;
+ const uint8_t* const usrc = block_to_save + U_OFF;
+ if (x < enc->mb_w_ - 1) { // left
+ int i;
+ for (i = 0; i < 16; ++i) {
+ enc->y_left_[i] = ysrc[15 + i * BPS];
+ }
+ for (i = 0; i < 8; ++i) {
+ enc->u_left_[i] = usrc[7 + i * BPS];
+ enc->v_left_[i] = usrc[15 + i * BPS];
+ }
+ // top-left (before 'top'!)
+ enc->y_left_[-1] = enc->y_top_[x * 16 + 15];
+ enc->u_left_[-1] = enc->uv_top_[x * 16 + 0 + 7];
+ enc->v_left_[-1] = enc->uv_top_[x * 16 + 8 + 7];
+ }
+ if (y < enc->mb_h_ - 1) { // top
+ memcpy(enc->y_top_ + x * 16, ysrc + 15 * BPS, 16);
+ memcpy(enc->uv_top_ + x * 16, usrc + 7 * BPS, 8 + 8);
+ }
+ }
+
+ it->mb_++;
+ it->preds_ += 4;
+ it->nz_++;
+ it->x_++;
+ if (it->x_ == enc->mb_w_) {
+ it->x_ = 0;
+ it->y_++;
+ it->bw_ = &enc->parts_[it->y_ & (enc->num_parts_ - 1)];
+ it->preds_ = enc->preds_ + it->y_ * 4 * enc->preds_w_;
+ it->nz_ = enc->nz_;
+ InitLeft(it);
+ }
+ return (0 < --it->done_);
+}
+
+//-----------------------------------------------------------------------------
+// Helper function to set mode properties
+
+void VP8SetIntra16Mode(const VP8EncIterator* const it, int mode) {
+ int y;
+ uint8_t* preds = it->preds_;
+ for (y = 0; y < 4; ++y) {
+ memset(preds, mode, 4);
+ preds += it->enc_->preds_w_;
+ }
+ it->mb_->type_ = 1;
+}
+
+void VP8SetIntra4Mode(const VP8EncIterator* const it, int modes[16]) {
+ int x, y;
+ uint8_t* preds = it->preds_;
+ for (y = 0; y < 4; ++y) {
+ for (x = 0; x < 4; ++x) {
+ preds[x] = modes[x + y * 4];
+ }
+ preds += it->enc_->preds_w_;
+ }
+ it->mb_->type_ = 0;
+}
+
+void VP8SetIntraUVMode(const VP8EncIterator* const it, int mode) {
+ it->mb_->uv_mode_ = mode;
+}
+
+void VP8SetSkip(const VP8EncIterator* const it, int skip) {
+ it->mb_->skip_ = skip;
+}
+
+void VP8SetSegment(const VP8EncIterator* const it, int segment) {
+ it->mb_->segment_ = segment;
+}
+
+//-----------------------------------------------------------------------------
+// Intra4x4 sub-blocks iteration
+//
+// We store and update the boundary samples into an array of 37 pixels. They
+// are updated as we iterate and reconstructs each intra4x4 blocks in turn.
+// The position of the samples has the following snake pattern:
+//
+// 16|17 18 19 20|21 22 23 24|25 26 27 28|29 30 31 32|33 34 35 36 <- Top-right
+// --+-----------+-----------+-----------+-----------+
+// 15| 19| 23| 27| 31|
+// 14| 18| 22| 26| 30|
+// 13| 17| 21| 25| 29|
+// 12|13 14 15 16|17 18 19 20|21 22 23 24|25 26 27 28|
+// --+-----------+-----------+-----------+-----------+
+// 11| 15| 19| 23| 27|
+// 10| 14| 18| 22| 26|
+// 9| 13| 17| 21| 25|
+// 8| 9 10 11 12|13 14 15 16|17 18 19 20|21 22 23 24|
+// --+-----------+-----------+-----------+-----------+
+// 7| 11| 15| 19| 23|
+// 6| 10| 14| 18| 22|
+// 5| 9| 13| 17| 21|
+// 4| 5 6 7 8| 9 10 11 12|13 14 15 16|17 18 19 20|
+// --+-----------+-----------+-----------+-----------+
+// 3| 7| 11| 15| 19|
+// 2| 6| 10| 14| 18|
+// 1| 5| 9| 13| 17|
+// 0| 1 2 3 4| 5 6 7 8| 9 10 11 12|13 14 15 16|
+// --+-----------+-----------+-----------+-----------+
+
+// Array to record the position of the top sample to pass to the prediction
+// functions in dsp.c.
+static const uint8_t VP8TopLeftI4[16] = {
+ 17, 21, 25, 29,
+ 13, 17, 21, 25,
+ 9, 13, 17, 21,
+ 5, 9, 13, 17
+};
+
+void VP8IteratorStartI4(VP8EncIterator* const it) {
+ VP8Encoder* const enc = it->enc_;
+ int i;
+
+ it->i4_ = 0; // first 4x4 sub-block
+ it->i4_top_ = it->i4_boundary_ + VP8TopLeftI4[0];
+
+ // Import the boundary samples
+ for (i = 0; i < 17; ++i) { // left
+ it->i4_boundary_[i] = enc->y_left_[15 - i];
+ }
+ for (i = 0; i < 16; ++i) { // top
+ it->i4_boundary_[17 + i] = enc->y_top_[it->x_ * 16 + i];
+ }
+ // top-right samples have a special case on the far right of the picture
+ if (it->x_ < enc->mb_w_ - 1) {
+ for (i = 16; i < 16 + 4; ++i) {
+ it->i4_boundary_[17 + i] = enc->y_top_[it->x_ * 16 + i];
+ }
+ } else { // else, replicate the last valid pixel four times
+ for (i = 16; i < 16 + 4; ++i) {
+ it->i4_boundary_[17 + i] = it->i4_boundary_[17 + 15];
+ }
+ }
+ VP8IteratorNzToBytes(it); // import the non-zero context
+}
+
+int VP8IteratorRotateI4(VP8EncIterator* const it,
+ const uint8_t* const yuv_out) {
+ const uint8_t* const blk = yuv_out + VP8Scan[it->i4_];
+ uint8_t* const top = it->i4_top_;
+ int i;
+
+ // Update the cache with 7 fresh samples
+ for (i = 0; i <= 3; ++i) {
+ top[-4 + i] = blk[i + 3 * BPS]; // store future top samples
+ }
+ if ((it->i4_ & 3) != 3) { // if not on the right sub-blocks #3, #7, #11, #15
+ for (i = 0; i <= 2; ++i) { // store future left samples
+ top[i] = blk[3 + (2 - i) * BPS];
+ }
+ } else { // else replicate top-right samples, as says the specs.
+ for (i = 0; i <= 3; ++i) {
+ top[i] = top[i + 4];
+ }
+ }
+ // move pointers to next sub-block
+ it->i4_++;
+ if (it->i4_ == 16) { // we're done
+ return 0;
+ }
+
+ it->i4_top_ = it->i4_boundary_ + VP8TopLeftI4[it->i4_];
+ return 1;
+}
+
+//-----------------------------------------------------------------------------
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/src/enc/picture.c b/src/enc/picture.c
new file mode 100644
index 00000000..6c12ea48
--- /dev/null
+++ b/src/enc/picture.c
@@ -0,0 +1,316 @@
+// Copyright 2011 Google Inc.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// WebPPicture utils: colorspace conversion, crop, ...
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#include <stdlib.h>
+#include "vp8enci.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+//-----------------------------------------------------------------------------
+// WebPPicture
+//-----------------------------------------------------------------------------
+
+int WebPPictureAlloc(WebPPicture* const picture) {
+ if (picture) {
+ const int width = picture->width;
+ const int height = picture->height;
+ const int uv_width = (width + 1) / 2;
+ const int uv_height = (height + 1) / 2;
+ const uint64_t y_size = (uint64_t)width * height;
+ const uint64_t uv_size = (uint64_t)uv_width * uv_height;
+ const uint64_t total_size = y_size + 2 * uv_size;
+ // Security and validation checks
+ if (uv_width <= 0 || uv_height <= 0 || // check param error
+ y_size >= (1ULL << 40) || // check for reasonable global size
+ (size_t)total_size != total_size) { // check for overflow on 32bit
+ return 0;
+ }
+ picture->y_stride = width;
+ picture->uv_stride = uv_width;
+ WebPPictureFree(picture); // erase previous buffer
+ picture->y = (uint8_t*)malloc((size_t)total_size);
+ if (picture->y == NULL) return 0;
+ picture->u = picture->y + y_size;
+ picture->v = picture->u + uv_size;
+ }
+ return 1;
+}
+
+void WebPPictureFree(WebPPicture* const picture) {
+ if (picture) {
+ free(picture->y);
+ picture->y = picture->u = picture->v = NULL;
+ }
+}
+
+//-----------------------------------------------------------------------------
+
+int WebPPictureCopy(const WebPPicture* const src, WebPPicture* const dst) {
+ int y;
+ if (src == NULL || dst == NULL) return 0;
+ if (src == dst) return 1;
+ *dst = *src;
+ dst->y = NULL;
+ if (!WebPPictureAlloc(dst)) return 0;
+ for (y = 0; y < dst->height; ++y) {
+ memcpy(dst->y + y * dst->y_stride, src->y + y * src->y_stride, src->width);
+ }
+ for (y = 0; y < (dst->height + 1) / 2; ++y) {
+ memcpy(dst->u + y * dst->uv_stride,
+ src->u + y * src->uv_stride, (src->width + 1) / 2);
+ memcpy(dst->v + y * dst->uv_stride,
+ src->v + y * src->uv_stride, (src->width + 1) / 2);
+ }
+ return 1;
+}
+
+int WebPPictureCrop(WebPPicture* const pic,
+ int left, int top, int width, int height) {
+ WebPPicture tmp;
+ int y;
+
+ if (pic == NULL) return 0;
+ if (width <= 0 || height <= 0) return 0;
+ if (left < 0 || ((left + width + 1) & ~1) > pic->width) return 0;
+ if (top < 0 || ((top + height + 1) & ~1) > pic->height) return 0;
+
+ tmp = *pic;
+ tmp.y = NULL;
+ tmp.width = width;
+ tmp.height = height;
+ if (!WebPPictureAlloc(&tmp)) return 0;
+
+ for (y = 0; y < height; ++y) {
+ memcpy(tmp.y + y * tmp.y_stride,
+ pic->y + (top + y) * pic->y_stride + left, width);
+ }
+ for (y = 0; y < (height + 1) / 2; ++y) {
+ const int offset = (y + top / 2) * pic->uv_stride + left / 2;
+ memcpy(tmp.u + y * tmp.uv_stride, pic->u + offset, (width + 1) / 2);
+ memcpy(tmp.v + y * tmp.uv_stride, pic->v + offset, (width + 1) / 2);
+ }
+ WebPPictureFree(pic);
+ *pic = tmp;
+ return 1;
+}
+
+//-----------------------------------------------------------------------------
+// Write-to-memory
+
+typedef struct {
+ uint8_t** mem;
+ size_t max_size;
+ size_t* size;
+} WebPMemoryWriter;
+
+static void InitMemoryWriter(WebPMemoryWriter* const writer) {
+ *writer->mem = NULL;
+ *writer->size = 0;
+ writer->max_size = 0;
+}
+
+static int WebPMemoryWrite(const uint8_t* data, size_t data_size,
+ const WebPPicture* const picture) {
+ WebPMemoryWriter* const w = (WebPMemoryWriter*)picture->custom_ptr;
+ size_t next_size;
+ if (w == NULL) {
+ return 1;
+ }
+ next_size = (*w->size) + data_size;
+ if (next_size > w->max_size) {
+ uint8_t* new_mem;
+ size_t next_max_size = w->max_size * 2;
+ if (next_max_size < next_size) next_max_size = next_size;
+ if (next_max_size < 8192) next_max_size = 8192;
+ new_mem = (uint8_t*)malloc(next_max_size);
+ if (new_mem == NULL) {
+ return 0;
+ }
+ if ((*w->size) > 0) {
+ memcpy(new_mem, *w->mem, *w->size);
+ }
+ free(*w->mem);
+ *w->mem = new_mem;
+ w->max_size = next_max_size;
+ }
+ if (data_size) {
+ memcpy((*w->mem) + (*w->size), data, data_size);
+ *w->size += data_size;
+ }
+ return 1;
+}
+
+//-----------------------------------------------------------------------------
+// RGB -> YUV conversion
+// The exact naming is Y'CbCr, following the ITU-R BT.601 standard.
+// More information at: http://en.wikipedia.org/wiki/YCbCr
+// Y = 0.2569 * R + 0.5044 * G + 0.0979 * B + 16
+// U = -0.1483 * R - 0.2911 * G + 0.4394 * B + 128
+// V = 0.4394 * R - 0.3679 * G - 0.0715 * B + 128
+// We use 16bit fixed point operations.
+
+enum { YUV_FRAC = 16 };
+
+static inline int clip_uv(int v) {
+ v = (v + (257 << (YUV_FRAC + 2 - 1))) >> (YUV_FRAC + 2);
+ return ((v & ~0xff) == 0) ? v : (v < 0) ? 0 : 255;
+}
+
+static inline int rgb_to_y(int r, int g, int b) {
+ const int kRound = (1 << (YUV_FRAC - 1)) + (16 << YUV_FRAC);
+ const int luma = 16839 * r + 33059 * g + 6420 * b;
+ return (luma + kRound) >> YUV_FRAC; // no need to clip
+}
+
+static inline int rgb_to_u(int r, int g, int b) {
+ return clip_uv(-9719 * r - 19081 * g + 28800 * b);
+}
+
+static inline int rgb_to_v(int r, int g, int b) {
+ return clip_uv(+28800 * r - 24116 * g - 4684 * b);
+}
+
+// TODO: we can do better than simply 2x2 averaging on U/V samples.
+#define SUM4(ptr) ((ptr)[0] + (ptr)[step] + \
+ (ptr)[rgb_stride] + (ptr)[rgb_stride + step])
+#define SUM2H(ptr) (2 * (ptr)[0] + 2 * (ptr)[step])
+#define SUM2V(ptr) (2 * (ptr)[0] + 2 * (ptr)[rgb_stride])
+#define SUM1(ptr) (4 * (ptr)[0])
+#define RGB_TO_UV(x, y, SUM) { \
+ const int src = (2 * (step * (x) + (y) * rgb_stride)); \
+ const int dst = (x) + (y) * picture->uv_stride; \
+ const int r = SUM(r_ptr + src); \
+ const int g = SUM(g_ptr + src); \
+ const int b = SUM(b_ptr + src); \
+ picture->u[dst] = rgb_to_u(r, g, b); \
+ picture->v[dst] = rgb_to_v(r, g, b); \
+}
+
+static int Import(WebPPicture* const picture,
+ const uint8_t* const rgb, int rgb_stride,
+ int step, int swap) {
+ int x, y;
+ const uint8_t* const r_ptr = rgb + (swap ? 2 : 0);
+ const uint8_t* const g_ptr = rgb + 1;
+ const uint8_t* const b_ptr = rgb + (swap ? 0 : 2);
+
+ for (y = 0; y < picture->height; ++y) {
+ for (x = 0; x < picture->width; ++x) {
+ const int offset = step * x + y * rgb_stride;
+ picture->y[x + y * picture->y_stride] =
+ rgb_to_y(r_ptr[offset], g_ptr[offset], b_ptr[offset]);
+ }
+ }
+ for (y = 0; y < (picture->height >> 1); ++y) {
+ for (x = 0; x < (picture->width >> 1); ++x) {
+ RGB_TO_UV(x, y, SUM4);
+ }
+ if (picture->width & 1) {
+ RGB_TO_UV(x, y, SUM2V);
+ }
+ }
+ if (picture->height & 1) {
+ for (x = 0; x < (picture->width >> 1); ++x) {
+ RGB_TO_UV(x, y, SUM2H);
+ }
+ if (picture->width & 1) {
+ RGB_TO_UV(x, y, SUM1);
+ }
+ }
+ return 1;
+}
+#undef SUM4
+#undef SUM2V
+#undef SUM2H
+#undef SUM1
+#undef RGB_TO_UV
+
+int WebPPictureImportRGB(WebPPicture* const picture,
+ const uint8_t* const rgb, int rgb_stride) {
+ if (!WebPPictureAlloc(picture)) return 0;
+ return Import(picture, rgb, rgb_stride, 3, 0);
+}
+
+int WebPPictureImportBGR(WebPPicture* const picture,
+ const uint8_t* const rgb, int rgb_stride) {
+ if (!WebPPictureAlloc(picture)) return 0;
+ return Import(picture, rgb, rgb_stride, 3, 1);
+}
+
+int WebPPictureImportRGBA(WebPPicture* const picture,
+ const uint8_t* const rgba, int rgba_stride) {
+ if (!WebPPictureAlloc(picture)) return 0;
+ return Import(picture, rgba, rgba_stride, 4, 0);
+}
+
+int WebPPictureImportBGRA(WebPPicture* const picture,
+ const uint8_t* const rgba, int rgba_stride) {
+ if (!WebPPictureAlloc(picture)) return 0;
+ return Import(picture, rgba, rgba_stride, 4, 1);
+}
+
+//-----------------------------------------------------------------------------
+// Simplest call:
+
+typedef int (*Importer)(WebPPicture* const, const uint8_t* const, int);
+
+static size_t Encode(const uint8_t* rgb, int width, int height, int stride,
+ Importer import, float quality_factor, uint8_t** output) {
+ size_t output_size = 0;
+ WebPPicture pic;
+ WebPConfig config;
+ WebPMemoryWriter wrt;
+ int ok;
+
+ if (!WebPConfigPreset(&config, WEBP_PRESET_DEFAULT, quality_factor) ||
+ !WebPPictureInit(&pic)) {
+ return 0; // shouldn't happen, except if system installation is broken
+ }
+
+ pic.width = width;
+ pic.height = height;
+ pic.writer = WebPMemoryWrite;
+ pic.custom_ptr = &wrt;
+
+ wrt.mem = output;
+ wrt.size = &output_size;
+ InitMemoryWriter(&wrt);
+
+ ok = import(&pic, rgb, stride) && WebPEncode(&config, &pic);
+ WebPPictureFree(&pic);
+ if (!ok) {
+ free(*output);
+ *output = NULL;
+ return 0;
+ }
+ return output_size;
+}
+
+#define ENCODE_FUNC(NAME, IMPORTER) \
+size_t NAME(const uint8_t* in, int w, int h, int bps, float q, \
+ uint8_t** out) { \
+ return Encode(in, w, h, bps, IMPORTER, q, out); \
+}
+
+ENCODE_FUNC(WebPEncodeRGB, WebPPictureImportRGB);
+ENCODE_FUNC(WebPEncodeBGR, WebPPictureImportBGR);
+ENCODE_FUNC(WebPEncodeRGBA, WebPPictureImportRGBA);
+ENCODE_FUNC(WebPEncodeBGRA, WebPPictureImportBGRA);
+
+#undef ENCODE_FUNC
+
+//-----------------------------------------------------------------------------
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/src/enc/quant.c b/src/enc/quant.c
new file mode 100644
index 00000000..e4399194
--- /dev/null
+++ b/src/enc/quant.c
@@ -0,0 +1,923 @@
+// Copyright 2011 Google Inc.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// Quantization
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#include <assert.h>
+#include <math.h>
+
+#include "vp8enci.h"
+#include "cost.h"
+
+#define DO_TRELLIS_I4 1
+#define DO_TRELLIS_I16 1 // not a huge gain, but ok at low bitrate.
+#define DO_TRELLIS_UV 0 // disable trellis for UV. Risky. Not worth.
+#define USE_TDISTO 1
+
+#define MID_ALPHA 64 // neutral value for susceptibility
+#define MIN_ALPHA 30 // lowest usable value for susceptibility
+#define MAX_ALPHA 100 // higher meaninful value for susceptibility
+
+#define SNS_TO_DQ 0.9 // Scaling constant between the sns value and the QP
+ // power-law modulation. Must be strictly less than 1.
+
+#define MULT_8B(a, b) (((a) * (b) + 128) >> 8)
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+//-----------------------------------------------------------------------------
+
+static inline int clip(int v, int m, int M) {
+ return v < m ? m : v > M ? M : v;
+}
+
+const uint8_t VP8Zigzag[16] = {
+ 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15
+};
+
+static const uint8_t kDcTable[128] = {
+ 4, 5, 6, 7, 8, 9, 10, 10,
+ 11, 12, 13, 14, 15, 16, 17, 17,
+ 18, 19, 20, 20, 21, 21, 22, 22,
+ 23, 23, 24, 25, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89,
+ 91, 93, 95, 96, 98, 100, 101, 102,
+ 104, 106, 108, 110, 112, 114, 116, 118,
+ 122, 124, 126, 128, 130, 132, 134, 136,
+ 138, 140, 143, 145, 148, 151, 154, 157
+};
+
+static const uint16_t kAcTable[128] = {
+ 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 60,
+ 62, 64, 66, 68, 70, 72, 74, 76,
+ 78, 80, 82, 84, 86, 88, 90, 92,
+ 94, 96, 98, 100, 102, 104, 106, 108,
+ 110, 112, 114, 116, 119, 122, 125, 128,
+ 131, 134, 137, 140, 143, 146, 149, 152,
+ 155, 158, 161, 164, 167, 170, 173, 177,
+ 181, 185, 189, 193, 197, 201, 205, 209,
+ 213, 217, 221, 225, 229, 234, 239, 245,
+ 249, 254, 259, 264, 269, 274, 279, 284
+};
+
+static const uint16_t kAcTable2[128] = {
+ 8, 8, 9, 10, 12, 13, 15, 17,
+ 18, 20, 21, 23, 24, 26, 27, 29,
+ 31, 32, 34, 35, 37, 38, 40, 41,
+ 43, 44, 46, 48, 49, 51, 52, 54,
+ 55, 57, 58, 60, 62, 63, 65, 66,
+ 68, 69, 71, 72, 74, 75, 77, 79,
+ 80, 82, 83, 85, 86, 88, 89, 93,
+ 96, 99, 102, 105, 108, 111, 114, 117,
+ 120, 124, 127, 130, 133, 136, 139, 142,
+ 145, 148, 151, 155, 158, 161, 164, 167,
+ 170, 173, 176, 179, 184, 189, 193, 198,
+ 203, 207, 212, 217, 221, 226, 230, 235,
+ 240, 244, 249, 254, 258, 263, 268, 274,
+ 280, 286, 292, 299, 305, 311, 317, 323,
+ 330, 336, 342, 348, 354, 362, 370, 379,
+ 385, 393, 401, 409, 416, 424, 432, 440
+};
+
+static const uint16_t kCoeffThresh[16] = {
+ 0, 10, 20, 30,
+ 10, 20, 30, 30,
+ 20, 30, 30, 30,
+ 30, 30, 30, 30
+};
+
+// TODO(skal): tune more. Coeff thresholding?
+static const uint8_t kBiasMatrices[3][16] = { // [3] = [luma-ac,luma-dc,chroma]
+ { 96, 96, 96, 96,
+ 96, 96, 96, 96,
+ 96, 96, 96, 96,
+ 96, 96, 96, 96 },
+ { 96, 96, 96, 96,
+ 96, 96, 96, 96,
+ 96, 96, 96, 96,
+ 96, 96, 96, 96 },
+ { 96, 96, 96, 96,
+ 96, 96, 96, 96,
+ 96, 96, 96, 96,
+ 96, 96, 96, 96 }
+};
+
+// Sharpening by (slightly) raising the hi-frequency coeffs (only for trellis).
+// Hack-ish but helpful for mid-bitrate range. Use with care.
+static const uint8_t kFreqSharpening[16] = {
+ 0, 30, 60, 90,
+ 30, 60, 90, 90,
+ 60, 90, 90, 90,
+ 90, 90, 90, 90
+};
+
+//-----------------------------------------------------------------------------
+// Initialize quantization parameters in VP8Matrix
+
+// Returns the average quantizer
+static int ExpandMatrix(VP8Matrix* const m, int type) {
+ int i;
+ int sum = 0;
+ for (i = 2; i < 16; ++i) {
+ m->q_[i] = m->q_[1];
+ }
+ for (i = 0; i < 16; ++i) {
+ const int j = VP8Zigzag[i];
+ const int bias = kBiasMatrices[type][j];
+ m->iq_[j] = (1 << QFIX) / m->q_[j];
+ m->bias_[j] = BIAS(bias);
+ // TODO(skal): tune kCoeffThresh[]
+ m->zthresh_[j] = ((256 /*+ kCoeffThresh[j]*/ - bias) * m->q_[j] + 127) >> 8;
+ m->sharpen_[j] = (kFreqSharpening[j] * m->q_[j]) >> 11;
+ sum += m->q_[j];
+ }
+ return (sum + 8) >> 4;
+}
+
+static void SetupMatrices(VP8Encoder* enc) {
+ int i;
+ const int tlambda_scale =
+ (enc->method_ >= 4) ? enc->config_->sns_strength
+ : 0;
+ const int num_segments = enc->segment_hdr_.num_segments_;
+ for (i = 0; i < num_segments; ++i) {
+ VP8SegmentInfo* const m = &enc->dqm_[i];
+ const int q = m->quant_;
+ int q4, q16, quv;
+ m->y1_.q_[0] = kDcTable[clip(q + enc->dq_y1_dc_, 0, 127)];
+ m->y1_.q_[1] = kAcTable[clip(q, 0, 127)];
+
+ m->y2_.q_[0] = kDcTable[ clip(q + enc->dq_y2_dc_, 0, 127)] * 2;
+ m->y2_.q_[1] = kAcTable2[clip(q + enc->dq_y2_ac_, 0, 127)];
+
+ m->uv_.q_[0] = kDcTable[clip(q + enc->dq_uv_dc_, 0, 117)];
+ m->uv_.q_[1] = kAcTable[clip(q + enc->dq_uv_ac_, 0, 127)];
+
+ q4 = ExpandMatrix(&m->y1_, 0);
+ q16 = ExpandMatrix(&m->y2_, 1);
+ quv = ExpandMatrix(&m->uv_, 2);
+
+ // TODO: Switch to kLambda*[] tables?
+ {
+ m->lambda_i4_ = (3 * q4 * q4) >> 7;
+ m->lambda_i16_ = (3 * q16 * q16);
+ m->lambda_uv_ = (3 * quv * quv) >> 6;
+ m->lambda_mode_ = (1 * q4 * q4) >> 7;
+ m->lambda_trellis_i4_ = (7 * q4 * q4) >> 3;
+ m->lambda_trellis_i16_ = (q16 * q16) >> 2;
+ m->lambda_trellis_uv_ = (quv *quv) << 1;
+ m->tlambda_ = (tlambda_scale * q4) >> 5;
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Initialize filtering parameters
+
+// Very small filter-strength values have close to no visual effect. So we can
+// save a little decoding-CPU by turning filtering off for these.
+#define FSTRENGTH_CUTOFF 3
+
+static void SetupFilterStrength(VP8Encoder* const enc) {
+ int i;
+ const int level0 = enc->config_->filter_strength;
+ for (i = 0; i < NUM_MB_SEGMENTS; ++i) {
+ // Segments with lower quantizer will be less filtered. TODO: tune (wrt SNS)
+ const int level = level0 * 256 * enc->dqm_[i].quant_ / 128;
+ const int f = level / (256 + enc->dqm_[i].beta_);
+ enc->dqm_[i].fstrength_ = (f < FSTRENGTH_CUTOFF) ? 0 : (f > 63) ? 63 : f;
+ }
+ // We record the initial strength (mainly for the case of 1-segment only).
+ enc->filter_hdr_.level_ = enc->dqm_[0].fstrength_;
+ enc->filter_hdr_.simple_ = (enc->config_->filter_type == 0);
+ enc->filter_hdr_.sharpness_ = enc->config_->filter_sharpness;
+}
+
+//-----------------------------------------------------------------------------
+
+// Note: if you change the values below, remember that the max range
+// allowed by the syntax for DQ_UV is [-16,16].
+#define MAX_DQ_UV (6)
+#define MIN_DQ_UV (-4)
+
+// We want to emulate jpeg-like behaviour where the expected "good" quality
+// is around q=75. Internally, our "good" middle is around c=50. So we
+// map accordingly using linear piece-wise function
+static double QualityToCompression(double q) {
+ const double c = q / 100.;
+ return (c < 0.75) ? c * (2. / 3.) : 2. * c - 1.;
+}
+
+void VP8SetSegmentParams(VP8Encoder* const enc, float quality) {
+ int i;
+ int dq_uv_ac, dq_uv_dc;
+ const int num_segments = enc->config_->segments;
+ const double amp = SNS_TO_DQ * enc->config_->sns_strength / 100. / 128.;
+ const double c_base = QualityToCompression(quality);
+ for (i = 0; i < num_segments; ++i) {
+ // The file size roughly scales as pow(quantizer, 3.). Actually, the
+ // exponent is somewhere between 2.8 and 3.2, but we're mostly interested
+ // in the mid-quant range. So we scale the compressibility inversely to
+ // this power-law: quant ~= compression ^ 1/3. This law holds well for
+ // low quant. Finer modelling for high-quant would make use of kAcTable[]
+ // more explicitely.
+ // Additionally, we modulate the base exponent 1/3 to accommodate for the
+ // quantization susceptibility and allow denser segments to be quantized
+ // more.
+ const double expn = (1. - amp * enc->dqm_[i].alpha_) / 3.;
+ const double c = pow(c_base, expn);
+ const int q = (int)(127. * (1. - c));
+ assert(expn > 0.);
+ enc->dqm_[i].quant_ = clip(q, 0, 127);
+ }
+
+ // purely indicative in the bitstream (except for the 1-segment case)
+ enc->base_quant_ = enc->dqm_[0].quant_;
+
+ // fill-in values for the unused segments (required by the syntax)
+ for (i = num_segments; i < NUM_MB_SEGMENTS; ++i) {
+ enc->dqm_[i].quant_ = enc->base_quant_;
+ }
+
+ // uv_alpha_ is normally spread around ~60. The useful range is
+ // typically ~30 (quite bad) to ~100 (ok to decimate UV more).
+ // We map it to the safe maximal range of MAX/MIN_DQ_UV for dq_uv.
+ dq_uv_ac = (enc->uv_alpha_ - MID_ALPHA) * (MAX_DQ_UV - MIN_DQ_UV)
+ / (MAX_ALPHA - MIN_ALPHA);
+ // we rescale by the user-defined strength of adaptation
+ dq_uv_ac = dq_uv_ac * enc->config_->sns_strength / 100;
+ // and make it safe.
+ dq_uv_ac = clip(dq_uv_ac, MIN_DQ_UV, MAX_DQ_UV);
+ // We also boost the dc-uv-quant a little, based on sns-strength, since
+ // U/V channels are quite more reactive to high quants (flat DC-blocks
+ // tend to appear, and are displeasant).
+ dq_uv_dc = -4 * enc->config_->sns_strength / 100;
+ dq_uv_dc = clip(dq_uv_dc, -15, 15); // 4bit-signed max allowed
+
+ enc->dq_y1_dc_ = 0; // TODO(skal): dq-lum
+ enc->dq_y2_dc_ = 0;
+ enc->dq_y2_ac_ = 0;
+ enc->dq_uv_dc_ = dq_uv_dc;
+ enc->dq_uv_ac_ = dq_uv_ac;
+
+ SetupMatrices(enc);
+
+ SetupFilterStrength(enc); // initialize segments' filtering, eventually
+}
+
+//-----------------------------------------------------------------------------
+// Form the predictions in cache
+
+// Must be ordered using {DC_PRED, TM_PRED, V_PRED, H_PRED} as index
+const int VP8I16ModeOffsets[4] = { I16DC16, I16TM16, I16VE16, I16HE16 };
+const int VP8UVModeOffsets[4] = { C8DC8, C8TM8, C8VE8, C8HE8 };
+
+// Must be indexed using {B_DC_PRED -> B_HU_PRED} as index
+const int VP8I4ModeOffsets[NUM_BMODES] = {
+ I4DC4, I4TM4, I4VE4, I4HE4, I4RD4, I4VR4, I4LD4, I4VL4, I4HD4, I4HU4
+};
+
+void VP8MakeLuma16Preds(const VP8EncIterator* const it) {
+ VP8Encoder* const enc = it->enc_;
+ const uint8_t* left = it->x_ ? enc->y_left_ : NULL;
+ const uint8_t* top = it->y_ ? enc->y_top_ + it->x_ * 16 : NULL;
+ VP8EncPredLuma16(it->yuv_p_, left, top);
+}
+
+void VP8MakeChroma8Preds(const VP8EncIterator* const it) {
+ VP8Encoder* const enc = it->enc_;
+ const uint8_t* left = it->x_ ? enc->u_left_ : NULL;
+ const uint8_t* top = it->y_ ? enc->uv_top_ + it->x_ * 16 : NULL;
+ VP8EncPredChroma8(it->yuv_p_, left, top);
+}
+
+void VP8MakeIntra4Preds(const VP8EncIterator* const it) {
+ VP8EncPredLuma4(it->yuv_p_, it->i4_top_);
+}
+
+//-----------------------------------------------------------------------------
+// Quantize
+
+// Layout:
+// +----+
+// |YYYY| 0
+// |YYYY| 4
+// |YYYY| 8
+// |YYYY| 12
+// +----+
+// |UUVV| 16
+// |UUVV| 20
+// +----+
+
+const int VP8Scan[16 + 4 + 4] = {
+ // Luma
+ 0 + 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS,
+ 0 + 4 * BPS, 4 + 4 * BPS, 8 + 4 * BPS, 12 + 4 * BPS,
+ 0 + 8 * BPS, 4 + 8 * BPS, 8 + 8 * BPS, 12 + 8 * BPS,
+ 0 + 12 * BPS, 4 + 12 * BPS, 8 + 12 * BPS, 12 + 12 * BPS,
+
+ 0 + 0 * BPS, 4 + 0 * BPS, 0 + 4 * BPS, 4 + 4 * BPS, // U
+ 8 + 0 * BPS, 12 + 0 * BPS, 8 + 4 * BPS, 12 + 4 * BPS // V
+};
+
+//-----------------------------------------------------------------------------
+// Distortion measurement
+
+static const uint16_t kWeightY[16] = {
+ 38, 32, 20, 9, 32, 28, 17, 7, 20, 17, 10, 4, 9, 7, 4, 2
+};
+
+static const uint16_t kWeightTrellis[16] = {
+#if USE_TDISTO == 0
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+#else
+ 30, 27, 19, 11,
+ 27, 24, 17, 10,
+ 19, 17, 12, 8,
+ 11, 10, 8, 6
+#endif
+};
+
+// Init/Copy the common fields in score.
+static void InitScore(VP8ModeScore* const rd) {
+ rd->D = 0;
+ rd->SD = 0;
+ rd->R = 0;
+ rd->nz = 0;
+ rd->score = MAX_COST;
+}
+
+static void CopyScore(VP8ModeScore* const dst, const VP8ModeScore* const src) {
+ dst->D = src->D;
+ dst->SD = src->SD;
+ dst->R = src->R;
+ dst->nz = src->nz; // note that nz is not accumulated, but just copied.
+ dst->score = src->score;
+}
+
+static void AddScore(VP8ModeScore* const dst, const VP8ModeScore* const src) {
+ dst->D += src->D;
+ dst->SD += src->SD;
+ dst->R += src->R;
+ dst->nz |= src->nz; // here, new nz bits are accumulated.
+ dst->score += src->score;
+}
+
+//-----------------------------------------------------------------------------
+// Performs trellis-optimized quantization.
+
+// Trellis
+
+typedef struct {
+ int prev; // best previous
+ int level; // level
+ int sign; // sign of coeff_i
+ score_t cost; // bit cost
+ score_t error; // distortion = sum of (|coeff_i| - level_i * Q_i)^2
+ int ctx; // context (only depends on 'level'. Could be spared.)
+} Node;
+
+// If a coefficient was quantized to a value Q (using a neutral bias),
+// we test all alternate possibilities between [Q-MIN_DELTA, Q+MAX_DELTA]
+// We don't test negative values though.
+#define MIN_DELTA 0 // how much lower level to try
+#define MAX_DELTA 1 // how much higher
+#define NUM_NODES (MIN_DELTA + 1 + MAX_DELTA)
+#define NODE(n, l) (nodes[(n) + 1][(l) + MIN_DELTA])
+
+static inline void SetRDScore(int lambda, VP8ModeScore* const rd) {
+ // TODO: incorporate the "* 256" in the tables?
+ rd->score = rd->R * lambda + 256 * (rd->D + rd->SD);
+}
+
+static inline score_t RDScoreTrellis(int lambda, score_t rate,
+ score_t distortion) {
+ return rate * lambda + 256 * distortion;
+}
+
+static int TrellisQuantizeBlock(const VP8EncIterator* const it,
+ int16_t in[16], int16_t out[16],
+ int ctx0, int coeff_type,
+ const VP8Matrix* const mtx,
+ int lambda) {
+ ProbaArray* const last_costs = it->enc_->proba_.coeffs_[coeff_type];
+ CostArray* const costs = it->enc_->proba_.level_cost_[coeff_type];
+ const int first = (coeff_type == 0) ? 1 : 0;
+ Node nodes[17][NUM_NODES];
+ int best_path[3] = {-1, -1, -1}; // store best-last/best-level/best-previous
+ score_t best_score;
+ int best_node;
+ int last = first - 1;
+ int n, m, p, nz;
+
+ {
+ score_t cost;
+ score_t max_error;
+ const int thresh = mtx->q_[1] * mtx->q_[1] / 4;
+ const int last_proba = last_costs[VP8EncBands[first]][ctx0][0];
+
+ // compute maximal distortion.
+ max_error = 0;
+ for (n = first; n < 16; ++n) {
+ const int j = VP8Zigzag[n];
+ const int err = in[j] * in[j];
+ max_error += kWeightTrellis[j] * err;
+ if (err > thresh) last = n;
+ }
+ // we don't need to go inspect up to n = 16 coeffs. We can just go up
+ // to last + 1 (inclusive) without losing much.
+ if (last < 15) ++last;
+
+ // compute 'skip' score. This is the max score one can do.
+ cost = VP8BitCost(0, last_proba);
+ best_score = RDScoreTrellis(lambda, cost, max_error);
+
+ // initialize source node.
+ n = first - 1;
+ for (m = -MIN_DELTA; m <= MAX_DELTA; ++m) {
+ NODE(n, m).cost = 0;
+ NODE(n, m).error = max_error;
+ NODE(n, m).ctx = ctx0;
+ }
+ }
+
+ // traverse trellis.
+ for (n = first; n <= last; ++n) {
+ const int j = VP8Zigzag[n];
+ const int Q = mtx->q_[j];
+ const int iQ = mtx->iq_[j];
+ const int B = BIAS(0x00); // neutral bias
+ // note: it's important to take sign of the _original_ coeff,
+ // so we don't have to consider level < 0 afterward.
+ const int sign = (in[j] < 0);
+ int coeff0 = (sign ? -in[j] : in[j]) + mtx->sharpen_[j];
+ int level0;
+ if (coeff0 > 2047) coeff0 = 2047;
+
+ level0 = QUANTDIV(coeff0, iQ, B);
+ // test all alternate level values around level0.
+ for (m = -MIN_DELTA; m <= MAX_DELTA; ++m) {
+ Node* const cur = &NODE(n, m);
+ int delta_error, new_error;
+ score_t cur_score = MAX_COST;
+ int level = level0 + m;
+ int last_proba;
+
+ cur->sign = sign;
+ cur->level = level;
+ cur->ctx = (level == 0) ? 0 : (level == 1) ? 1 : 2;
+ if (level >= 2048 || level < 0) { // node is dead?
+ cur->cost = MAX_COST;
+ continue;
+ }
+ last_proba = last_costs[VP8EncBands[n + 1]][cur->ctx][0];
+
+ // Compute delta_error = how much coding this level will
+ // subtract as distortion to max_error
+ new_error = coeff0 - level * Q;
+ delta_error =
+ kWeightTrellis[j] * (coeff0 * coeff0 - new_error * new_error);
+
+ // Inspect all possible non-dead predecessors. Retain only the best one.
+ for (p = -MIN_DELTA; p <= MAX_DELTA; ++p) {
+ const Node* const prev = &NODE(n - 1, p);
+ const int prev_ctx = prev->ctx;
+ const uint16_t* const tcost = costs[VP8EncBands[n]][prev_ctx];
+ const score_t total_error = prev->error - delta_error;
+ score_t cost, base_cost, score;
+
+ if (prev->cost >= MAX_COST) { // dead node?
+ continue;
+ }
+
+ // Base cost of both terminal/non-terminal
+ base_cost = prev->cost + VP8LevelCost(tcost, level);
+
+ // Examine node assuming it's a non-terminal one.
+ cost = base_cost;
+ if (level && n < 15) {
+ cost += VP8BitCost(1, last_proba);
+ }
+ score = RDScoreTrellis(lambda, cost, total_error);
+ if (score < cur_score) {
+ cur_score = score;
+ cur->cost = cost;
+ cur->error = total_error;
+ cur->prev = p;
+ }
+
+ // Now, record best terminal node (and thus best entry in the graph).
+ if (level) {
+ cost = base_cost;
+ if (n < 15) cost += VP8BitCost(0, last_proba);
+ score = RDScoreTrellis(lambda, cost, total_error);
+ if (score < best_score) {
+ best_score = score;
+ best_path[0] = n; // best eob position
+ best_path[1] = m; // best level
+ best_path[2] = p; // best predecessor
+ }
+ }
+ }
+ }
+ }
+
+ // Fresh start
+ memset(in + first, 0, (16 - first) * sizeof(*in));
+ memset(out + first, 0, (16 - first) * sizeof(*out));
+ if (best_path[0] == -1) {
+ return 0; // skip!
+ }
+
+ // Unwind the best path.
+ // Note: best-prev on terminal node is not necessarily equal to the
+ // best_prev for non-terminal. So we patch best_path[2] in.
+ n = best_path[0];
+ best_node = best_path[1];
+ NODE(n, best_node).prev = best_path[2]; // force best-prev for terminal
+ nz = 0;
+
+ for (; n >= first; --n) {
+ const Node* const node = &NODE(n, best_node);
+ const int j = VP8Zigzag[n];
+ out[n] = node->sign ? -node->level : node->level;
+ nz |= (node->level != 0);
+ in[j] = out[n] * mtx->q_[j];
+ best_node = node->prev;
+ }
+ return nz;
+}
+
+#undef NODE
+
+//-----------------------------------------------------------------------------
+// Performs: difference, transform, quantize, back-transform, add
+// all at once. Output is the reconstructed block in *yuv_out, and the
+// quantized levels in *levels.
+
+static int ReconstructIntra16(VP8EncIterator* const it,
+ VP8ModeScore* const rd,
+ uint8_t* const yuv_out,
+ int mode) {
+ const VP8Encoder* const enc = it->enc_;
+ const uint8_t* const ref = it->yuv_p_ + VP8I16ModeOffsets[mode];
+ const uint8_t* const src = it->yuv_in_ + Y_OFF;
+ const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
+ int nz = 0;
+ int n;
+ int16_t tmp[16][16], dc_tmp[16];
+
+ for (n = 0; n < 16; ++n) {
+ VP8FTransform(src + VP8Scan[n], ref + VP8Scan[n], tmp[n]);
+ }
+ VP8FTransformWHT(tmp[0], dc_tmp);
+ nz |= VP8EncQuantizeBlock(dc_tmp, rd->y_dc_levels, 0, &dqm->y2_) << 24;
+
+ if (DO_TRELLIS_I16 && it->do_trellis_) {
+ int x, y;
+ VP8IteratorNzToBytes(it);
+ for (y = 0, n = 0; y < 4; ++y) {
+ for (x = 0; x < 4; ++x, ++n) {
+ const int ctx = it->top_nz_[x] + it->left_nz_[y];
+ const int non_zero =
+ TrellisQuantizeBlock(it, tmp[n], rd->y_ac_levels[n], ctx, 0,
+ &dqm->y1_, dqm->lambda_trellis_i16_);
+ it->top_nz_[x] = it->left_nz_[y] = non_zero;
+ nz |= non_zero << n;
+ }
+ }
+ } else {
+ for (n = 0; n < 16; ++n) {
+ nz |= VP8EncQuantizeBlock(tmp[n], rd->y_ac_levels[n], 1, &dqm->y1_) << n;
+ }
+ }
+
+ // Transform back
+ VP8ITransformWHT(dc_tmp, tmp[0]);
+ for (n = 0; n < 16; ++n) {
+ VP8ITransform(ref + VP8Scan[n], tmp[n], yuv_out + VP8Scan[n]);
+ }
+
+ return nz;
+}
+
+static int ReconstructIntra4(VP8EncIterator* const it,
+ int16_t levels[16],
+ const uint8_t* const src,
+ uint8_t* const yuv_out,
+ int mode) {
+ const VP8Encoder* const enc = it->enc_;
+ const uint8_t* const ref = it->yuv_p_ + VP8I4ModeOffsets[mode];
+ const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
+ int nz = 0;
+ int16_t tmp[16];
+
+ VP8FTransform(src, ref, tmp);
+ if (DO_TRELLIS_I4 && it->do_trellis_) {
+ const int x = it->i4_ & 3, y = it->i4_ >> 2;
+ const int ctx = it->top_nz_[x] + it->left_nz_[y];
+ nz = TrellisQuantizeBlock(it, tmp, levels, ctx, 3, &dqm->y1_,
+ dqm->lambda_trellis_i4_);
+ } else {
+ nz = VP8EncQuantizeBlock(tmp, levels, 0, &dqm->y1_);
+ }
+ VP8ITransform(ref, tmp, yuv_out);
+ return nz;
+}
+
+static int ReconstructUV(VP8EncIterator* const it, VP8ModeScore* const rd,
+ uint8_t* const yuv_out, int mode) {
+ const VP8Encoder* const enc = it->enc_;
+ const uint8_t* const ref = it->yuv_p_ + VP8UVModeOffsets[mode];
+ const uint8_t* const src = it->yuv_in_ + U_OFF;
+ const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
+ int nz = 0;
+ int n;
+ int16_t tmp[8][16];
+
+ for (n = 0; n < 8; ++n) {
+ VP8FTransform(src + VP8Scan[16 + n], ref + VP8Scan[16 + n], tmp[n]);
+ }
+ if (DO_TRELLIS_UV && it->do_trellis_) {
+ int ch, x, y;
+ for (ch = 0, n = 0; ch <= 2; ch += 2) {
+ for (y = 0; y < 2; ++y) {
+ for (x = 0; x < 2; ++x, ++n) {
+ const int ctx = it->top_nz_[4 + ch + x] + it->left_nz_[4 + ch + y];
+ const int non_zero =
+ TrellisQuantizeBlock(it, tmp[n], rd->uv_levels[n], ctx, 2, &dqm->uv_,
+ dqm->lambda_trellis_uv_);
+ it->top_nz_[4 + ch + x] = it->left_nz_[4 + ch + y] = non_zero;
+ nz |= non_zero << n;
+ }
+ }
+ }
+ } else {
+ for (n = 0; n < 8; ++n) {
+ nz |= VP8EncQuantizeBlock(tmp[n], rd->uv_levels[n], 0, &dqm->uv_) << n;
+ }
+ }
+
+ for (n = 0; n < 8; ++n) {
+ VP8ITransform(ref + VP8Scan[16 + n], tmp[n], yuv_out + VP8Scan[16 + n]);
+ }
+ return (nz << 16);
+}
+
+//-----------------------------------------------------------------------------
+// RD-opt decision. Reconstruct each modes, evalue distortion and bit-cost.
+// Pick the mode is lower RD-cost = Rate + lamba * Distortion.
+
+static void SwapPtr(uint8_t** a, uint8_t** b) {
+ uint8_t* const tmp = *a;
+ *a = *b;
+ *b = tmp;
+}
+
+static void SwapOut(VP8EncIterator* const it) {
+ SwapPtr(&it->yuv_out_, &it->yuv_out2_);
+}
+
+static void PickBestIntra16(VP8EncIterator* const it, VP8ModeScore* const rd) {
+ VP8Encoder* const enc = it->enc_;
+ const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
+ const int lambda = dqm->lambda_i16_;
+ const int tlambda = dqm->tlambda_;
+ const uint8_t* const src = it->yuv_in_ + Y_OFF;
+ VP8ModeScore rd16;
+ int mode;
+
+ rd->mode_i16 = -1;
+ for (mode = 0; mode < 4; ++mode) {
+ uint8_t* const tmp_dst = it->yuv_out2_ + Y_OFF; // scratch buffer
+ int nz;
+
+ // Reconstruct
+ nz = ReconstructIntra16(it, &rd16, tmp_dst, mode);
+
+ // Measure RD-score
+ rd16.D = VP8SSE16x16(src, tmp_dst);
+ rd16.SD = tlambda ? MULT_8B(tlambda, VP8TDisto16x16(src, tmp_dst, kWeightY))
+ : 0;
+ rd16.R = VP8GetCostLuma16(it, &rd16);
+ rd16.R += VP8FixedCostsI16[mode];
+
+ // Since we always examine Intra16 first, we can overwrite *rd directly.
+ SetRDScore(lambda, &rd16);
+ if (mode == 0 || rd16.score < rd->score) {
+ CopyScore(rd, &rd16);
+ rd->mode_i16 = mode;
+ rd->nz = nz;
+ memcpy(rd->y_ac_levels, rd16.y_ac_levels, sizeof(rd16.y_ac_levels));
+ memcpy(rd->y_dc_levels, rd16.y_dc_levels, sizeof(rd16.y_dc_levels));
+ SwapOut(it);
+ }
+ }
+ SetRDScore(dqm->lambda_mode_, rd); // finalize score for mode decision.
+ VP8SetIntra16Mode(it, rd->mode_i16);
+}
+
+//-----------------------------------------------------------------------------
+
+// return the cost array corresponding to the surrounding prediction modes.
+static const uint16_t* GetCostModeI4(VP8EncIterator* const it,
+ const int modes[16]) {
+ const int preds_w = it->enc_->preds_w_;
+ const int x = (it->i4_ & 3), y = it->i4_ >> 2;
+ const int left = (x == 0) ? it->preds_[y * preds_w - 1] : modes[it->i4_ - 1];
+ const int top = (y == 0) ? it->preds_[-preds_w + x] : modes[it->i4_ - 4];
+ return VP8FixedCostsI4[top][left];
+}
+
+static int PickBestIntra4(VP8EncIterator* const it, VP8ModeScore* const rd) {
+ VP8Encoder* const enc = it->enc_;
+ const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
+ const int lambda = dqm->lambda_i4_;
+ const int tlambda = dqm->tlambda_;
+ const uint8_t* const src0 = it->yuv_in_ + Y_OFF;
+ uint8_t* const best_blocks = it->yuv_out2_ + Y_OFF;
+ VP8ModeScore rd_best;
+
+ InitScore(&rd_best);
+ rd_best.score = 0;
+ VP8IteratorStartI4(it);
+ do {
+ VP8ModeScore rd_i4;
+ int mode;
+ int best_mode = -1;
+ const uint8_t* const src = src0 + VP8Scan[it->i4_];
+ const uint16_t* const mode_costs = GetCostModeI4(it, rd->modes_i4);
+ uint8_t* best_block = best_blocks + VP8Scan[it->i4_];
+ uint8_t* tmp_dst = it->yuv_p_ + I4TMP; // scratch buffer.
+
+ InitScore(&rd_i4);
+ VP8MakeIntra4Preds(it);
+ for (mode = 0; mode < NUM_BMODES; ++mode) {
+ VP8ModeScore rd_tmp;
+ int16_t tmp_levels[16];
+
+ // Reconstruct
+ rd_tmp.nz =
+ ReconstructIntra4(it, tmp_levels, src, tmp_dst, mode) << it->i4_;
+
+ // Compute RD-score
+ rd_tmp.D = VP8SSE4x4(src, tmp_dst);
+ rd_tmp.SD =
+ tlambda ? MULT_8B(tlambda, VP8TDisto4x4(src, tmp_dst, kWeightY))
+ : 0;
+ rd_tmp.R = VP8GetCostLuma4(it, tmp_levels);
+ rd_tmp.R += mode_costs[mode];
+
+ SetRDScore(lambda, &rd_tmp);
+ if (best_mode < 0 || rd_tmp.score < rd_i4.score) {
+ CopyScore(&rd_i4, &rd_tmp);
+ best_mode = mode;
+ SwapPtr(&tmp_dst, &best_block);
+ memcpy(rd_best.y_ac_levels[it->i4_], tmp_levels, sizeof(tmp_levels));
+ }
+ }
+ SetRDScore(dqm->lambda_mode_, &rd_i4);
+ AddScore(&rd_best, &rd_i4);
+ if (rd_best.score >= rd->score) {
+ return 0;
+ }
+ // Copy selected samples if not in the right place already.
+ if (best_block != best_blocks + VP8Scan[it->i4_])
+ VP8Copy4x4(best_block, best_blocks + VP8Scan[it->i4_]);
+ rd->modes_i4[it->i4_] = best_mode;
+ it->top_nz_[it->i4_ & 3] = it->left_nz_[it->i4_ >> 2] = (rd_i4.nz ? 1 : 0);
+ } while (VP8IteratorRotateI4(it, best_blocks));
+
+ // finalize state
+ CopyScore(rd, &rd_best);
+ VP8SetIntra4Mode(it, rd->modes_i4);
+ SwapOut(it);
+ memcpy(rd->y_ac_levels, rd_best.y_ac_levels, sizeof(rd->y_ac_levels));
+ return 1; // select intra4x4 over intra16x16
+}
+
+//-----------------------------------------------------------------------------
+
+static void PickBestUV(VP8EncIterator* const it, VP8ModeScore* const rd) {
+ VP8Encoder* const enc = it->enc_;
+ const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
+ const int lambda = dqm->lambda_uv_;
+ const uint8_t* const src = it->yuv_in_ + U_OFF;
+ uint8_t* const tmp_dst = it->yuv_out2_ + U_OFF; // scratch buffer
+ uint8_t* const dst0 = it->yuv_out_ + U_OFF;
+ VP8ModeScore rd_best;
+ int mode;
+
+ rd->mode_uv = -1;
+ InitScore(&rd_best);
+ for (mode = 0; mode < 4; ++mode) {
+ VP8ModeScore rd_uv;
+
+ // Reconstruct
+ rd_uv.nz = ReconstructUV(it, &rd_uv, tmp_dst, mode);
+
+ // Compute RD-score
+ rd_uv.D = VP8SSE16x8(src, tmp_dst);
+ rd_uv.SD = 0; // TODO: should we call TDisto? it tends to flatten areas.
+ rd_uv.R = VP8GetCostUV(it, &rd_uv);
+ rd_uv.R += VP8FixedCostsUV[mode];
+
+ SetRDScore(lambda, &rd_uv);
+ if (mode == 0 || rd_uv.score < rd_best.score) {
+ CopyScore(&rd_best, &rd_uv);
+ rd->mode_uv = mode;
+ memcpy(rd->uv_levels, rd_uv.uv_levels, sizeof(rd->uv_levels));
+ memcpy(dst0, tmp_dst, UV_SIZE); // TODO: SwapUVOut() ?
+ }
+ }
+ VP8SetIntraUVMode(it, rd->mode_uv);
+ AddScore(rd, &rd_best);
+}
+
+//-----------------------------------------------------------------------------
+// Final reconstruction and quantization.
+
+static void SimpleQuantize(VP8EncIterator* const it, VP8ModeScore* const rd) {
+ const VP8Encoder* const enc = it->enc_;
+ const int i16 = (it->mb_->type_ == 1);
+ int nz = 0;
+
+ if (i16) {
+ nz = ReconstructIntra16(it, rd, it->yuv_out_ + Y_OFF, it->preds_[0]);
+ } else {
+ VP8IteratorStartI4(it);
+ do {
+ const int mode =
+ it->preds_[(it->i4_ & 3) + (it->i4_ >> 2) * enc->preds_w_];
+ const uint8_t* const src = it->yuv_in_ + Y_OFF + VP8Scan[it->i4_];
+ uint8_t* const dst = it->yuv_out_ + Y_OFF + VP8Scan[it->i4_];
+ VP8MakeIntra4Preds(it);
+ nz |= ReconstructIntra4(it, rd->y_ac_levels[it->i4_],
+ src, dst, mode) << it->i4_;
+ } while (VP8IteratorRotateI4(it, it->yuv_out_ + Y_OFF));
+ }
+
+ nz |= ReconstructUV(it, rd, it->yuv_out_ + U_OFF, it->mb_->uv_mode_);
+ rd->nz = nz;
+}
+
+//-----------------------------------------------------------------------------
+// Entry point
+
+int VP8Decimate(VP8EncIterator* const it, VP8ModeScore* const rd, int rd_opt) {
+ int is_skipped;
+
+ InitScore(rd);
+
+ // We can perform predictions for Luma16x16 and Chroma8x8 already.
+ // Luma4x4 predictions needs to be done as-we-go.
+ VP8MakeLuma16Preds(it);
+ VP8MakeChroma8Preds(it);
+
+ // for rd_opt = 2, we perform trellis-quant on the final decision only.
+ // for rd_opt > 2, we use it for every scoring (=much slower).
+ if (rd_opt > 0) {
+ it->do_trellis_ = (rd_opt > 2);
+ PickBestIntra16(it, rd);
+ if (it->enc_->method_ >= 2) {
+ PickBestIntra4(it, rd);
+ }
+ PickBestUV(it, rd);
+ if (rd_opt == 2) {
+ it->do_trellis_ = 1;
+ SimpleQuantize(it, rd);
+ }
+ } else {
+ // TODO: for method_ == 2, pick the best intra4/intra16 based on SSE
+ it->do_trellis_ = (it->enc_->method_ == 2);
+ SimpleQuantize(it, rd);
+ }
+ is_skipped = (rd->nz == 0);
+ VP8SetSkip(it, is_skipped);
+ return is_skipped;
+}
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/src/enc/syntax.c b/src/enc/syntax.c
new file mode 100644
index 00000000..a788f3c3
--- /dev/null
+++ b/src/enc/syntax.c
@@ -0,0 +1,237 @@
+// Copyright 2011 Google Inc.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// Header syntax writing
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#include <assert.h>
+#include <math.h>
+
+#include "vp8enci.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+#define KSIGNATURE 0x9d012a
+#define KHEADER_SIZE 10
+#define KRIFF_SIZE 20
+#define KSIZE_OFFSET (KRIFF_SIZE - 8)
+
+#define MAX_PARTITION0_SIZE (1 << 19) // max size of mode partition
+#define MAX_PARTITION_SIZE (1 << 24) // max size for token partition
+
+//-----------------------------------------------------------------------------
+// Writers for header's various pieces (in order of appearance)
+
+// Main keyframe header
+
+static void PutLE32(uint8_t* const data, uint32_t val) {
+ data[0] = (val >> 0) & 0xff;
+ data[1] = (val >> 8) & 0xff;
+ data[2] = (val >> 16) & 0xff;
+ data[3] = (val >> 24) & 0xff;
+}
+
+static int PutHeader(int profile, size_t size0, size_t total_size,
+ const WebPPicture* const pic) {
+ uint8_t buf[KHEADER_SIZE];
+ uint8_t RIFF[KRIFF_SIZE] = {
+ 'R', 'I', 'F', 'F', 0, 0, 0, 0, 'W', 'E', 'B', 'P', 'V', 'P', '8', ' '
+ };
+ uint32_t bits;
+
+ if (size0 >= MAX_PARTITION0_SIZE) {
+ return 0; // partition #0 is too big to fit
+ }
+
+ PutLE32(RIFF + 4, total_size + KSIZE_OFFSET);
+ PutLE32(RIFF + 16, total_size);
+ if (!pic->writer(RIFF, sizeof(RIFF), pic))
+ return 0;
+
+ bits = 0 // keyframe (1b)
+ | (profile << 1) // profile (3b)
+ | (1 << 4) // visible (1b)
+ | (size0 << 5); // partition length (19b)
+ buf[0] = bits & 0xff;
+ buf[1] = (bits >> 8) & 0xff;
+ buf[2] = (bits >> 16) & 0xff;
+ // signature
+ buf[3] = (KSIGNATURE >> 16) & 0xff;
+ buf[4] = (KSIGNATURE >> 8) & 0xff;
+ buf[5] = (KSIGNATURE >> 0) & 0xff;
+ // dimensions
+ buf[6] = pic->width & 0xff;
+ buf[7] = pic->width >> 8;
+ buf[8] = pic->height & 0xff;
+ buf[9] = pic->height >> 8;
+
+ return pic->writer(buf, sizeof(buf), pic);
+}
+
+// Segmentation header
+static void PutSegmentHeader(VP8BitWriter* const bw,
+ const VP8Encoder* const enc) {
+ const VP8SegmentHeader* const hdr = &enc->segment_hdr_;
+ const VP8Proba* const proba = &enc->proba_;
+ if (VP8PutBitUniform(bw, (hdr->num_segments_ > 1))) {
+ // We always 'update' the quant and filter strength values
+ const int update_data = 1;
+ int s;
+ VP8PutBitUniform(bw, hdr->update_map_);
+ if (VP8PutBitUniform(bw, update_data)) {
+ // we always use absolute values, not relative ones
+ VP8PutBitUniform(bw, 1); // (segment_feature_mode = 1. Paragraph 9.3.)
+ for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
+ VP8PutSignedValue(bw, enc->dqm_[s].quant_, 7);
+ }
+ for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
+ VP8PutSignedValue(bw, enc->dqm_[s].fstrength_, 6);
+ }
+ }
+ if (hdr->update_map_) {
+ for (s = 0; s < 3; ++s) {
+ if (VP8PutBitUniform(bw, (proba->segments_[s] != 255u))) {
+ VP8PutValue(bw, proba->segments_[s], 8);
+ }
+ }
+ }
+ }
+}
+
+// Filtering parameters header
+static void PutFilterHeader(VP8BitWriter* const bw,
+ const VP8FilterHeader* const hdr) {
+ const int use_lf_delta = (hdr->i4x4_lf_delta_ != 0);
+ VP8PutBitUniform(bw, hdr->simple_);
+ VP8PutValue(bw, hdr->level_, 6);
+ VP8PutValue(bw, hdr->sharpness_, 3);
+ if (VP8PutBitUniform(bw, use_lf_delta)) {
+ // '0' is the default value for i4x4_lf_delta_ at frame #0.
+ const int need_update = (hdr->i4x4_lf_delta_ != 0);
+ if (VP8PutBitUniform(bw, need_update)) {
+ // we don't use ref_lf_delta => emit four 0 bits
+ VP8PutValue(bw, 0, 4);
+ // we use mode_lf_delta for i4x4
+ VP8PutSignedValue(bw, hdr->i4x4_lf_delta_, 6);
+ VP8PutValue(bw, 0, 3); // all others unused
+ }
+ }
+}
+
+// Nominal quantization parameters
+static void PutQuant(VP8BitWriter* const bw,
+ const VP8Encoder* const enc) {
+ VP8PutValue(bw, enc->base_quant_, 7);
+ VP8PutSignedValue(bw, enc->dq_y1_dc_, 4);
+ VP8PutSignedValue(bw, enc->dq_y2_dc_, 4);
+ VP8PutSignedValue(bw, enc->dq_y2_ac_, 4);
+ VP8PutSignedValue(bw, enc->dq_uv_dc_, 4);
+ VP8PutSignedValue(bw, enc->dq_uv_ac_, 4);
+}
+
+// Partition sizes
+static int EmitPartitionsSize(const VP8Encoder* const enc,
+ const WebPPicture* const pic) {
+ uint8_t buf[3 * (MAX_NUM_PARTITIONS - 1)];
+ int p;
+ for (p = 0; p < enc->num_parts_ - 1; ++p) {
+ const size_t part_size = VP8BitWriterSize(enc->parts_ + p);
+ if (part_size >= MAX_PARTITION_SIZE) {
+ return 0; // partition is too big to fit
+ }
+ buf[3 * p + 0] = (part_size >> 0) & 0xff;
+ buf[3 * p + 1] = (part_size >> 8) & 0xff;
+ buf[3 * p + 2] = (part_size >> 16) & 0xff;
+ }
+ return p ? pic->writer(buf, 3 * p, pic) : 1;
+}
+
+//-----------------------------------------------------------------------------
+
+static size_t GeneratePartition0(VP8Encoder* const enc) {
+ VP8BitWriter* const bw = &enc->bw_;
+ const int mb_size = enc->mb_w_ * enc->mb_h_;
+ uint64_t pos1, pos2, pos3;
+
+ pos1 = VP8BitWriterPos(bw);
+ VP8BitWriterInit(bw, mb_size * 7 / 8); // ~7 bits per macroblock
+ VP8PutBitUniform(bw, 0); // colorspace
+ VP8PutBitUniform(bw, 0); // clamp type
+
+ PutSegmentHeader(bw, enc);
+ PutFilterHeader(bw, &enc->filter_hdr_);
+ VP8PutValue(bw, enc->config_->partitions, 2);
+ PutQuant(bw, enc);
+ VP8PutBitUniform(bw, 0); // no proba update
+ VP8WriteProbas(bw, &enc->proba_);
+ pos2 = VP8BitWriterPos(bw);
+ VP8CodeIntraModes(enc);
+ VP8BitWriterFinish(bw);
+ pos3 = VP8BitWriterPos(bw);
+
+ if (enc->pic_->stats) {
+ enc->pic_->stats->header_bytes[0] = (int)((pos2 - pos1 + 7) >> 3);
+ enc->pic_->stats->header_bytes[1] = (int)((pos3 - pos2 + 7) >> 3);
+ }
+ return !bw->error_;
+}
+
+int VP8EncWrite(VP8Encoder* const enc) {
+ WebPPicture* const pic = enc->pic_;
+ VP8BitWriter* const bw = &enc->bw_;
+ int ok = 0;
+ size_t coded_size, pad;
+ int p;
+
+ // Partition #0 with header and partition sizes
+ ok = GeneratePartition0(enc);
+
+ // Compute total size (for the RIFF header)
+ coded_size = KHEADER_SIZE + VP8BitWriterSize(bw) + 3 * (enc->num_parts_ - 1);
+ for (p = 0; p < enc->num_parts_; ++p) {
+ coded_size += VP8BitWriterSize(enc->parts_ + p);
+ }
+ pad = coded_size & 1;
+ coded_size += pad;
+
+ // Emit headers and partition #0
+ {
+ const uint8_t* const part0 = VP8BitWriterBuf(bw);
+ const size_t size0 = VP8BitWriterSize(bw);
+ ok = ok && PutHeader(enc->profile_, size0, coded_size, pic)
+ && pic->writer(part0, size0, pic)
+ && EmitPartitionsSize(enc, pic);
+ free((void*)part0);
+ }
+
+ // Token partitions
+ for (p = 0; p < enc->num_parts_; ++p) {
+ const uint8_t* const buf = VP8BitWriterBuf(enc->parts_ + p);
+ const size_t size = VP8BitWriterSize(enc->parts_ + p);
+ if (size)
+ ok = ok && pic->writer(buf, size, pic);
+ free((void*)buf);
+ }
+
+ // Padding byte
+ if (ok && pad) {
+ const uint8_t pad_byte[1] = { 0 };
+ ok = pic->writer(pad_byte, 1, pic);
+ }
+
+ enc->coded_size_ = coded_size + KRIFF_SIZE;
+ return ok;
+}
+
+//-----------------------------------------------------------------------------
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/src/enc/tree.c b/src/enc/tree.c
new file mode 100644
index 00000000..b1a9aa40
--- /dev/null
+++ b/src/enc/tree.c
@@ -0,0 +1,507 @@
+// Copyright 2011 Google Inc.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// Token probabilities
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#include "vp8enci.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+//-----------------------------------------------------------------------------
+// Default probabilities
+
+// Paragraph 13.5
+const uint8_t
+ VP8CoeffsProba0[NUM_TYPES][NUM_BANDS][NUM_CTX][NUM_PROBAS] = {
+ // genereated using vp8_default_coef_probs() in entropy.c:129
+ { { { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }
+ },
+ { { 253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128 },
+ { 189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128 },
+ { 106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128 }
+ },
+ { { 1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128 },
+ { 181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128 },
+ { 78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128 },
+ },
+ { { 1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128 },
+ { 184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128 },
+ { 77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128 },
+ },
+ { { 1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128 },
+ { 170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128 },
+ { 37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128 }
+ },
+ { { 1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128 },
+ { 207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128 },
+ { 102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128 }
+ },
+ { { 1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128 },
+ { 177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128 },
+ { 80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128 }
+ },
+ { { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }
+ }
+ },
+ { { { 198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62 },
+ { 131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1 },
+ { 68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128 }
+ },
+ { { 1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128 },
+ { 184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128 },
+ { 81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128 }
+ },
+ { { 1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128 },
+ { 99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128 },
+ { 23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128 }
+ },
+ { { 1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128 },
+ { 109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128 },
+ { 44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128 }
+ },
+ { { 1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128 },
+ { 94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128 },
+ { 22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128 }
+ },
+ { { 1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128 },
+ { 124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128 },
+ { 35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128 }
+ },
+ { { 1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128 },
+ { 121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128 },
+ { 45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128 }
+ },
+ { { 1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128 },
+ { 203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128 },
+ { 137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128 }
+ }
+ },
+ { { { 253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128 },
+ { 175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128 },
+ { 73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128 }
+ },
+ { { 1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128 },
+ { 239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128 },
+ { 155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128 }
+ },
+ { { 1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128 },
+ { 201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128 },
+ { 69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128 }
+ },
+ { { 1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128 },
+ { 223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128 },
+ { 141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128 }
+ },
+ { { 1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128 },
+ { 190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128 },
+ { 149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }
+ },
+ { { 1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128 }
+ },
+ { { 1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128 },
+ { 213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128 },
+ { 55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128 }
+ },
+ { { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }
+ }
+ },
+ { { { 202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255 },
+ { 126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128 },
+ { 61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128 }
+ },
+ { { 1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128 },
+ { 166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128 },
+ { 39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128 }
+ },
+ { { 1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128 },
+ { 124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128 },
+ { 24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128 }
+ },
+ { { 1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128 },
+ { 149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128 },
+ { 28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128 }
+ },
+ { { 1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128 },
+ { 123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128 },
+ { 20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128 }
+ },
+ { { 1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128 },
+ { 168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128 },
+ { 47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128 }
+ },
+ { { 1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128 },
+ { 141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128 },
+ { 42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128 }
+ },
+ { { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
+ { 238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }
+ }
+ }
+};
+
+void VP8DefaultProbas(VP8Encoder* const enc) {
+ VP8Proba* const probas = &enc->proba_;
+ memset(probas->segments_, 255u, sizeof(probas->segments_));
+ memcpy(probas->coeffs_, VP8CoeffsProba0, sizeof(VP8CoeffsProba0));
+ probas->use_skip_proba_ = 0;
+}
+
+// Paragraph 11.5. 900bytes.
+static const uint8_t kBModesProba[NUM_BMODES][NUM_BMODES][NUM_BMODES - 1] = {
+ { { 231, 120, 48, 89, 115, 113, 120, 152, 112 },
+ { 152, 179, 64, 126, 170, 118, 46, 70, 95 },
+ { 175, 69, 143, 80, 85, 82, 72, 155, 103 },
+ { 56, 58, 10, 171, 218, 189, 17, 13, 152 },
+ { 114, 26, 17, 163, 44, 195, 21, 10, 173 },
+ { 121, 24, 80, 195, 26, 62, 44, 64, 85 },
+ { 144, 71, 10, 38, 171, 213, 144, 34, 26 },
+ { 170, 46, 55, 19, 136, 160, 33, 206, 71 },
+ { 63, 20, 8, 114, 114, 208, 12, 9, 226 },
+ { 81, 40, 11, 96, 182, 84, 29, 16, 36 } },
+ { { 134, 183, 89, 137, 98, 101, 106, 165, 148 },
+ { 72, 187, 100, 130, 157, 111, 32, 75, 80 },
+ { 66, 102, 167, 99, 74, 62, 40, 234, 128 },
+ { 41, 53, 9, 178, 241, 141, 26, 8, 107 },
+ { 74, 43, 26, 146, 73, 166, 49, 23, 157 },
+ { 65, 38, 105, 160, 51, 52, 31, 115, 128 },
+ { 104, 79, 12, 27, 217, 255, 87, 17, 7 },
+ { 87, 68, 71, 44, 114, 51, 15, 186, 23 },
+ { 47, 41, 14, 110, 182, 183, 21, 17, 194 },
+ { 66, 45, 25, 102, 197, 189, 23, 18, 22 } },
+ { { 88, 88, 147, 150, 42, 46, 45, 196, 205 },
+ { 43, 97, 183, 117, 85, 38, 35, 179, 61 },
+ { 39, 53, 200, 87, 26, 21, 43, 232, 171 },
+ { 56, 34, 51, 104, 114, 102, 29, 93, 77 },
+ { 39, 28, 85, 171, 58, 165, 90, 98, 64 },
+ { 34, 22, 116, 206, 23, 34, 43, 166, 73 },
+ { 107, 54, 32, 26, 51, 1, 81, 43, 31 },
+ { 68, 25, 106, 22, 64, 171, 36, 225, 114 },
+ { 34, 19, 21, 102, 132, 188, 16, 76, 124 },
+ { 62, 18, 78, 95, 85, 57, 50, 48, 51 } },
+ { { 193, 101, 35, 159, 215, 111, 89, 46, 111 },
+ { 60, 148, 31, 172, 219, 228, 21, 18, 111 },
+ { 112, 113, 77, 85, 179, 255, 38, 120, 114 },
+ { 40, 42, 1, 196, 245, 209, 10, 25, 109 },
+ { 88, 43, 29, 140, 166, 213, 37, 43, 154 },
+ { 61, 63, 30, 155, 67, 45, 68, 1, 209 },
+ { 100, 80, 8, 43, 154, 1, 51, 26, 71 },
+ { 142, 78, 78, 16, 255, 128, 34, 197, 171 },
+ { 41, 40, 5, 102, 211, 183, 4, 1, 221 },
+ { 51, 50, 17, 168, 209, 192, 23, 25, 82 } },
+ { { 138, 31, 36, 171, 27, 166, 38, 44, 229 },
+ { 67, 87, 58, 169, 82, 115, 26, 59, 179 },
+ { 63, 59, 90, 180, 59, 166, 93, 73, 154 },
+ { 40, 40, 21, 116, 143, 209, 34, 39, 175 },
+ { 47, 15, 16, 183, 34, 223, 49, 45, 183 },
+ { 46, 17, 33, 183, 6, 98, 15, 32, 183 },
+ { 57, 46, 22, 24, 128, 1, 54, 17, 37 },
+ { 65, 32, 73, 115, 28, 128, 23, 128, 205 },
+ { 40, 3, 9, 115, 51, 192, 18, 6, 223 },
+ { 87, 37, 9, 115, 59, 77, 64, 21, 47 } },
+ { { 104, 55, 44, 218, 9, 54, 53, 130, 226 },
+ { 64, 90, 70, 205, 40, 41, 23, 26, 57 },
+ { 54, 57, 112, 184, 5, 41, 38, 166, 213 },
+ { 30, 34, 26, 133, 152, 116, 10, 32, 134 },
+ { 39, 19, 53, 221, 26, 114, 32, 73, 255 },
+ { 31, 9, 65, 234, 2, 15, 1, 118, 73 },
+ { 75, 32, 12, 51, 192, 255, 160, 43, 51 },
+ { 88, 31, 35, 67, 102, 85, 55, 186, 85 },
+ { 56, 21, 23, 111, 59, 205, 45, 37, 192 },
+ { 55, 38, 70, 124, 73, 102, 1, 34, 98 } },
+ { { 125, 98, 42, 88, 104, 85, 117, 175, 82 },
+ { 95, 84, 53, 89, 128, 100, 113, 101, 45 },
+ { 75, 79, 123, 47, 51, 128, 81, 171, 1 },
+ { 57, 17, 5, 71, 102, 57, 53, 41, 49 },
+ { 38, 33, 13, 121, 57, 73, 26, 1, 85 },
+ { 41, 10, 67, 138, 77, 110, 90, 47, 114 },
+ { 115, 21, 2, 10, 102, 255, 166, 23, 6 },
+ { 101, 29, 16, 10, 85, 128, 101, 196, 26 },
+ { 57, 18, 10, 102, 102, 213, 34, 20, 43 },
+ { 117, 20, 15, 36, 163, 128, 68, 1, 26 } },
+ { { 102, 61, 71, 37, 34, 53, 31, 243, 192 },
+ { 69, 60, 71, 38, 73, 119, 28, 222, 37 },
+ { 68, 45, 128, 34, 1, 47, 11, 245, 171 },
+ { 62, 17, 19, 70, 146, 85, 55, 62, 70 },
+ { 37, 43, 37, 154, 100, 163, 85, 160, 1 },
+ { 63, 9, 92, 136, 28, 64, 32, 201, 85 },
+ { 75, 15, 9, 9, 64, 255, 184, 119, 16 },
+ { 86, 6, 28, 5, 64, 255, 25, 248, 1 },
+ { 56, 8, 17, 132, 137, 255, 55, 116, 128 },
+ { 58, 15, 20, 82, 135, 57, 26, 121, 40 } },
+ { { 164, 50, 31, 137, 154, 133, 25, 35, 218 },
+ { 51, 103, 44, 131, 131, 123, 31, 6, 158 },
+ { 86, 40, 64, 135, 148, 224, 45, 183, 128 },
+ { 22, 26, 17, 131, 240, 154, 14, 1, 209 },
+ { 45, 16, 21, 91, 64, 222, 7, 1, 197 },
+ { 56, 21, 39, 155, 60, 138, 23, 102, 213 },
+ { 83, 12, 13, 54, 192, 255, 68, 47, 28 },
+ { 85, 26, 85, 85, 128, 128, 32, 146, 171 },
+ { 18, 11, 7, 63, 144, 171, 4, 4, 246 },
+ { 35, 27, 10, 146, 174, 171, 12, 26, 128 } },
+ { { 190, 80, 35, 99, 180, 80, 126, 54, 45 },
+ { 85, 126, 47, 87, 176, 51, 41, 20, 32 },
+ { 101, 75, 128, 139, 118, 146, 116, 128, 85 },
+ { 56, 41, 15, 176, 236, 85, 37, 9, 62 },
+ { 71, 30, 17, 119, 118, 255, 17, 18, 138 },
+ { 101, 38, 60, 138, 55, 70, 43, 26, 142 },
+ { 146, 36, 19, 30, 171, 255, 97, 27, 20 },
+ { 138, 45, 61, 62, 219, 1, 81, 188, 64 },
+ { 32, 41, 20, 117, 151, 142, 20, 21, 163 },
+ { 112, 19, 12, 61, 195, 128, 48, 4, 24 } }
+};
+
+static int PutI4Mode(VP8BitWriter* const bw, int mode,
+ const uint8_t* const prob) {
+ if (VP8PutBit(bw, mode != B_DC_PRED, prob[0])) {
+ if (VP8PutBit(bw, mode != B_TM_PRED, prob[1])) {
+ if (VP8PutBit(bw, mode != B_VE_PRED, prob[2])) {
+ if (!VP8PutBit(bw, mode >= B_LD_PRED, prob[3])) {
+ if (VP8PutBit(bw, mode != B_HE_PRED, prob[4])) {
+ VP8PutBit(bw, mode != B_RD_PRED, prob[5]);
+ }
+ } else {
+ if (VP8PutBit(bw, mode != B_LD_PRED, prob[6])) {
+ if (VP8PutBit(bw, mode != B_VL_PRED, prob[7])) {
+ VP8PutBit(bw, mode != B_HD_PRED, prob[8]);
+ }
+ }
+ }
+ }
+ }
+ }
+ return mode;
+}
+
+static void PutI16Mode(VP8BitWriter* const bw, int mode) {
+ if (VP8PutBit(bw, (mode == TM_PRED || mode == H_PRED), 156)) {
+ VP8PutBit(bw, mode == TM_PRED, 128); // TM or HE
+ } else {
+ VP8PutBit(bw, mode == V_PRED, 163); // VE or DC
+ }
+}
+
+static void PutUVMode(VP8BitWriter* const bw, int uv_mode) {
+ if (VP8PutBit(bw, uv_mode != DC_PRED, 142)) {
+ if (VP8PutBit(bw, uv_mode != V_PRED, 114)) {
+ VP8PutBit(bw, uv_mode != H_PRED, 183); // else: TM_PRED
+ }
+ }
+}
+
+static void PutSegment(VP8BitWriter* const bw, int s, const uint8_t* p) {
+ if (VP8PutBit(bw, s >= 2, p[0])) p += 1;
+ VP8PutBit(bw, s & 1, p[1]);
+}
+
+void VP8CodeIntraModes(VP8Encoder* const enc) {
+ VP8BitWriter* const bw = &enc->bw_;
+ VP8EncIterator it;
+ VP8IteratorInit(enc, &it);
+ do {
+ const VP8MBInfo* mb = it.mb_;
+ const uint8_t* preds = it.preds_;
+ if (enc->segment_hdr_.update_map_) {
+ PutSegment(bw, mb->segment_, enc->proba_.segments_);
+ }
+ if (enc->proba_.use_skip_proba_) {
+ VP8PutBit(bw, mb->skip_, enc->proba_.skip_proba_);
+ }
+ if (VP8PutBit(bw, (mb->type_ != 0), 145)) { // i16x16
+ PutI16Mode(bw, preds[0]);
+ } else {
+ const int preds_w = enc->preds_w_;
+ const uint8_t* top_pred = preds - preds_w;
+ int x, y;
+ for (y = 0; y < 4; ++y) {
+ int left = preds[-1];
+ for (x = 0; x < 4; ++x) {
+ const uint8_t* const probas = kBModesProba[top_pred[x]][left];
+ left = PutI4Mode(bw, preds[x], probas);
+ }
+ top_pred = preds;
+ preds += preds_w;
+ }
+ }
+ PutUVMode(bw, mb->uv_mode_);
+ } while (VP8IteratorNext(&it, 0));
+}
+
+//-----------------------------------------------------------------------------
+// Paragraph 13
+
+const uint8_t
+ VP8CoeffsUpdateProba[NUM_TYPES][NUM_BANDS][NUM_CTX][NUM_PROBAS] = {
+ { { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255 },
+ { 250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ }
+ },
+ { { { 217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255 },
+ { 234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255 }
+ },
+ { { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ }
+ },
+ { { { 186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255 },
+ { 251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255 }
+ },
+ { { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ }
+ },
+ { { { 248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255 },
+ { 248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ },
+ { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }
+ }
+ }
+};
+
+void VP8WriteProbas(VP8BitWriter* const bw, const VP8Proba* const probas) {
+ int t, b, c, p;
+ for (t = 0; t < NUM_TYPES; ++t) {
+ for (b = 0; b < NUM_BANDS; ++b) {
+ for (c = 0; c < NUM_CTX; ++c) {
+ for (p = 0; p < NUM_PROBAS; ++p) {
+ const uint8_t p0 = probas->coeffs_[t][b][c][p];
+ const int update = (p0 != VP8CoeffsProba0[t][b][c][p]);
+ if (VP8PutBit(bw, update, VP8CoeffsUpdateProba[t][b][c][p])) {
+ VP8PutValue(bw, p0, 8);
+ }
+ }
+ }
+ }
+ }
+ if (VP8PutBitUniform(bw, probas->use_skip_proba_)) {
+ VP8PutValue(bw, probas->skip_proba_, 8);
+ }
+}
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/src/enc/vp8enci.h b/src/enc/vp8enci.h
new file mode 100644
index 00000000..b19450d3
--- /dev/null
+++ b/src/enc/vp8enci.h
@@ -0,0 +1,471 @@
+// Copyright 2011 Google Inc.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// WebP encoder: internal header.
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#ifndef WEBP_ENC_VP8ENCI_H_
+#define WEBP_ENC_VP8ENCI_H_
+
+#include "string.h" // for memcpy()
+#include "webp/encode.h"
+#include "bit_writer.h"
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+//-----------------------------------------------------------------------------
+// Various defines and enums
+
+// version numbers
+#define ENC_MAJ_VERSION 0
+#define ENC_MIN_VERSION 1
+#define ENC_REV_VERSION 2
+
+// intra prediction modes
+enum { B_DC_PRED = 0, // 4x4 modes
+ B_TM_PRED = 1,
+ B_VE_PRED = 2,
+ B_HE_PRED = 3,
+ B_RD_PRED = 4,
+ B_VR_PRED = 5,
+ B_LD_PRED = 6,
+ B_VL_PRED = 7,
+ B_HD_PRED = 8,
+ B_HU_PRED = 9,
+ NUM_BMODES = B_HU_PRED + 1 - B_DC_PRED, // = 10
+
+ // Luma16 or UV modes
+ DC_PRED = B_DC_PRED, V_PRED = B_VE_PRED,
+ H_PRED = B_HE_PRED, TM_PRED = B_TM_PRED
+ };
+
+enum { NUM_MB_SEGMENTS = 4,
+ MAX_NUM_PARTITIONS = 8,
+ NUM_TYPES = 4, // 0: i16-AC, 1: i16-DC, 2:chroma-AC, 3:i4-AC
+ NUM_BANDS = 8,
+ NUM_CTX = 3,
+ NUM_PROBAS = 11,
+ MAX_LF_LEVELS = 64, // Maximum loop filter level
+ MAX_VARIABLE_LEVEL = 67 // last (inclusive) level with variable cost
+ };
+
+// YUV-cache parameters. Cache is 16-pixels wide.
+// The original or reconstructed samples can be accessed using VP8Scan[]
+// The predicted blocks can be accessed using offsets to yuv_p_ and
+// the arrays VP8*ModeOffsets[];
+// +----+ YUV Samples area. See VP8Scan[] for accessing the blocks.
+// Y_OFF |YYYY| <- original samples (enc->yuv_in_)
+// |YYYY|
+// |YYYY|
+// |YYYY|
+// U_OFF |UUVV| V_OFF (=U_OFF + 8)
+// |UUVV|
+// +----+
+// Y_OFF |YYYY| <- compressed/decoded samples ('yuv_out_')
+// |YYYY| There are two buffers like this ('yuv_out_'/'yuv_out2_')
+// |YYYY|
+// |YYYY|
+// U_OFF |UUVV| V_OFF
+// |UUVV|
+// x2 (for yuv_out2_)
+// +----+ Prediction area ('yuv_p_', size = PRED_SIZE)
+// I16DC16 |YYYY| Intra16 predictions (16x16 block each)
+// |YYYY|
+// |YYYY|
+// |YYYY|
+// I16TM16 |YYYY|
+// |YYYY|
+// |YYYY|
+// |YYYY|
+// I16VE16 |YYYY|
+// |YYYY|
+// |YYYY|
+// |YYYY|
+// I16HE16 |YYYY|
+// |YYYY|
+// |YYYY|
+// |YYYY|
+// +----+ Chroma U/V predictions (16x8 block each)
+// C8DC8 |UUVV|
+// |UUVV|
+// C8TM8 |UUVV|
+// |UUVV|
+// C8VE8 |UUVV|
+// |UUVV|
+// C8HE8 |UUVV|
+// |UUVV|
+// +----+ Intra 4x4 predictions (4x4 block each)
+// |YYYY| I4DC4 I4TM4 I4VE4 I4HE4
+// |YYYY| I4RD4 I4VR4 I4LD4 I4VL4
+// |YY..| I4HD4 I4HU4 I4TMP
+// +----+
+#define BPS 16 // this is the common stride
+#define Y_SIZE (BPS * 16)
+#define UV_SIZE (BPS * 8)
+#define YUV_SIZE (Y_SIZE + UV_SIZE)
+#define PRED_SIZE (6 * 16 * BPS + 12 * BPS)
+#define Y_OFF (0)
+#define U_OFF (Y_SIZE)
+#define V_OFF (U_OFF + 8)
+#define ALIGN_CST 15
+#define DO_ALIGN(PTR) ((uintptr_t)((PTR) + ALIGN_CST) & ~ALIGN_CST)
+
+extern const int VP8Scan[16 + 4 + 4]; // in quant.c
+extern const int VP8UVModeOffsets[4]; // in analyze.c
+extern const int VP8I16ModeOffsets[4];
+extern const int VP8I4ModeOffsets[NUM_BMODES];
+
+// Layout of prediction blocks
+// intra 16x16
+#define I16DC16 (0 * 16 * BPS)
+#define I16TM16 (1 * 16 * BPS)
+#define I16VE16 (2 * 16 * BPS)
+#define I16HE16 (3 * 16 * BPS)
+// chroma 8x8, two U/V blocks side by side (hence: 16x8 each)
+#define C8DC8 (4 * 16 * BPS)
+#define C8TM8 (4 * 16 * BPS + 8 * BPS)
+#define C8VE8 (5 * 16 * BPS)
+#define C8HE8 (5 * 16 * BPS + 8 * BPS)
+// intra 4x4
+#define I4DC4 (6 * 16 * BPS + 0)
+#define I4TM4 (6 * 16 * BPS + 4)
+#define I4VE4 (6 * 16 * BPS + 8)
+#define I4HE4 (6 * 16 * BPS + 12)
+#define I4RD4 (6 * 16 * BPS + 4 * BPS + 0)
+#define I4VR4 (6 * 16 * BPS + 4 * BPS + 4)
+#define I4LD4 (6 * 16 * BPS + 4 * BPS + 8)
+#define I4VL4 (6 * 16 * BPS + 4 * BPS + 12)
+#define I4HD4 (6 * 16 * BPS + 8 * BPS + 0)
+#define I4HU4 (6 * 16 * BPS + 8 * BPS + 4)
+#define I4TMP (6 * 16 * BPS + 8 * BPS + 8)
+
+typedef int64_t score_t; // type used for scores, rate, distortion
+#define MAX_COST ((score_t)0x7fffffffffffffLL)
+
+#define QFIX 17
+#define BIAS(b) ((b) << (QFIX - 8))
+// Fun fact: this is the _only_ line where we're actually being lossy and
+// discarding bits.
+static inline int QUANTDIV(int n, int iQ, int B) {
+ return (n * iQ + B) >> QFIX;
+}
+extern const uint8_t VP8Zigzag[16];
+
+//-----------------------------------------------------------------------------
+// Headers
+
+typedef uint8_t ProbaArray[NUM_CTX][NUM_PROBAS];
+typedef uint64_t StatsArray[NUM_CTX][NUM_PROBAS][2];
+typedef uint16_t CostArray[NUM_CTX][MAX_VARIABLE_LEVEL + 1];
+typedef double LFStats[NUM_MB_SEGMENTS][MAX_LF_LEVELS]; // filter stats
+
+typedef struct VP8Encoder VP8Encoder;
+
+// segment features
+typedef struct {
+ int num_segments_; // Actual number of segments. 1 segment only = unused.
+ int update_map_; // whether to update the segment map or not.
+ // must be 0 if there's only 1 segment.
+ int size_; // bit-cost for transmitting the segment map
+} VP8SegmentHeader;
+
+// Struct collecting all frame-persistent probabilities.
+typedef struct {
+ uint8_t segments_[3]; // probabilities for segment tree
+ uint8_t skip_proba_; // final probability of being skipped.
+ ProbaArray coeffs_[NUM_TYPES][NUM_BANDS]; // 924 bytes
+ StatsArray stats_[NUM_TYPES][NUM_BANDS]; // 7.4k
+ CostArray level_cost_[NUM_TYPES][NUM_BANDS]; // 11.4k
+ int use_skip_proba_; // Note: we always use skip_proba for now.
+ int nb_skip_, nb_i4_, nb_i16_; // block type counters
+} VP8Proba;
+
+// Filter parameters. Not actually used in the code (we don't perform
+// the in-loop filtering), but filled from user's config
+typedef struct {
+ int simple_; // filtering type: 0=complex, 1=simple
+ int level_; // base filter level [0..63]
+ int sharpness_; // [0..7]
+ int i4x4_lf_delta_; // delta filter level for i4x4 relative to i16x16
+} VP8FilterHeader;
+
+//-----------------------------------------------------------------------------
+// Informations about the macroblocks.
+
+typedef struct {
+ // block type
+ uint8_t type_:2; // 0=i4x4, 1=i16x16
+ uint8_t uv_mode_:2;
+ uint8_t skip_:1;
+ uint8_t segment_:2;
+ uint8_t alpha_; // quantization-susceptibility
+} VP8MBInfo;
+
+typedef struct {
+ uint16_t q_[16]; // quantizer steps
+ uint16_t iq_[16]; // reciprocals, fixed point.
+ uint16_t bias_[16]; // rounding bias
+ uint16_t zthresh_[16]; // value under which a coefficient is zeroed
+ uint16_t sharpen_[16]; // frequency boosters for slight sharpening
+} VP8Matrix;
+
+typedef struct {
+ VP8Matrix y1_, y2_, uv_; // quantization matrices
+ int alpha_; // quant-susceptibility, range [-127,127]. Zero is neutral.
+ // Lower values indicate a lower risk of blurriness.
+ int beta_; // filter-susceptibility, range [0,255].
+ int quant_; // final segment quantizer.
+ int fstrength_; // final in-loop filtering strength
+ // reactivities
+ int lambda_i16_, lambda_i4_, lambda_uv_;
+ int lambda_mode_, lambda_trellis_, tlambda_;
+ int lambda_trellis_i16_, lambda_trellis_i4_, lambda_trellis_uv_;
+} VP8SegmentInfo;
+
+// Handy transcient struct to accumulate score and info during RD-optimization
+// and mode evaluation.
+typedef struct {
+ score_t D, SD, R, score; // Distortion, spectral distortion, rate, score.
+ int16_t y_dc_levels[16]; // Quantized levels for luma-DC, luma-AC, chroma.
+ int16_t y_ac_levels[16][16];
+ int16_t uv_levels[4 + 4][16];
+ int mode_i16; // mode number for intra16 prediction
+ int modes_i4[16]; // mode numbers for intra4 predictions
+ int mode_uv; // mode number of chroma prediction
+ uint32_t nz; // non-zero blocks
+} VP8ModeScore;
+
+// Iterator structure to iterate through macroblocks, pointing to the
+// right neighbouring data (samples, predictions, contexts, ...)
+typedef struct {
+ int x_, y_; // current macroblock
+ int y_offset_, uv_offset_; // offset to the luma / chroma planes
+ int y_stride_, uv_stride_; // respective strides
+ uint8_t* yuv_in_; // borrowed from enc_ (for now)
+ uint8_t* yuv_out_; // ''
+ uint8_t* yuv_out2_; // ''
+ uint8_t* yuv_p_; // ''
+ VP8Encoder* enc_; // back-pointer
+ VP8MBInfo* mb_; // current macroblock
+ VP8BitWriter* bw_; // current bit-writer
+ uint8_t* preds_; // intra mode predictors (4x4 blocks)
+ uint32_t* nz_; // non-zero pattern
+ uint8_t i4_boundary_[37]; // 32+5 boundary samples needed by intra4x4
+ uint8_t* i4_top_; // pointer to the current *top boundary sample
+ int i4_; // current intra4x4 mode being tested
+ int top_nz_[9]; // top-non-zero context.
+ int left_nz_[9]; // left-non-zero. left_nz[8] is independent.
+ uint64_t bit_count_[4][3]; // bit counters for coded levels.
+ uint64_t luma_bits_; // macroblock bit-cost for luma
+ uint64_t uv_bits_; // macroblock bit-cost for chroma
+ LFStats* lf_stats_; // filter stats (borrowed from enc_)
+ int do_trellis_; // if true, perform extra level optimisation
+ int done_; // true when scan is finished
+} VP8EncIterator;
+
+ // in iterator.c
+// must be called first.
+void VP8IteratorInit(VP8Encoder* const enc, VP8EncIterator* const it);
+// restart a scan.
+void VP8IteratorReset(VP8EncIterator* const it);
+// import samples from source
+void VP8IteratorImport(const VP8EncIterator* const it);
+// export decimated samples
+void VP8IteratorExport(const VP8EncIterator* const it);
+// go to next macroblock. Returns !done_. If *block_to_save is non-null, will
+// save the boundary values to top_/left_ arrays. block_to_save can be
+// it->yuv_out_ or it->yuv_in_.
+int VP8IteratorNext(VP8EncIterator* const it,
+ const uint8_t* const block_to_save);
+// Intra4x4 iterations
+void VP8IteratorStartI4(VP8EncIterator* const it);
+// returns true if not done.
+int VP8IteratorRotateI4(VP8EncIterator* const it,
+ const uint8_t* const yuv_out);
+
+// Non-zero context setup/teardown
+void VP8IteratorNzToBytes(VP8EncIterator* const it);
+void VP8IteratorBytesToNz(VP8EncIterator* const it);
+
+// Helper functions to set mode properties
+void VP8SetIntra16Mode(const VP8EncIterator* const it, int mode);
+void VP8SetIntra4Mode(const VP8EncIterator* const it, int modes[16]);
+void VP8SetIntraUVMode(const VP8EncIterator* const it, int mode);
+void VP8SetSkip(const VP8EncIterator* const it, int skip);
+void VP8SetSegment(const VP8EncIterator* const it, int segment);
+void VP8IteratorResetCosts(VP8EncIterator* const it);
+
+//-----------------------------------------------------------------------------
+// VP8Encoder
+
+struct VP8Encoder {
+ const WebPConfig* config_; // user configuration and parameters
+ WebPPicture* pic_; // input / output picture
+
+ // headers
+ VP8FilterHeader filter_hdr_; // filtering information
+ VP8SegmentHeader segment_hdr_; // segment information
+
+ int profile_; // VP8's profile, deduced from Config.
+
+ // dimension, in macroblock units.
+ int mb_w_, mb_h_;
+ int preds_w_; // stride of the *preds_ prediction plane (=4*mb_w + 1)
+
+ // number of partitions (1, 2, 4 or 8 = MAX_NUM_PARTITIONS)
+ int num_parts_;
+
+ // per-partition boolean decoders.
+ VP8BitWriter bw_; // part0
+ VP8BitWriter parts_[MAX_NUM_PARTITIONS]; // token partitions
+
+ // quantization info (one set of DC/AC dequant factor per segment)
+ VP8SegmentInfo dqm_[NUM_MB_SEGMENTS];
+ int base_quant_; // nominal quantizer value. Only used
+ // for relative coding of segments' quant.
+ int uv_alpha_; // U/V quantization susceptibility
+ // global offset of quantizers, shared by all segments
+ int dq_y1_dc_;
+ int dq_y2_dc_, dq_y2_ac_;
+ int dq_uv_dc_, dq_uv_ac_;
+
+ // probabilities and statistics
+ VP8Proba proba_;
+ uint64_t sse_[3]; // sum of Y/U/V squared errors for all macroblocks
+ uint64_t sse_count_; // pixel count for the sse_[] stats
+ int coded_size_;
+ int residual_bytes_[3][4];
+ int block_count_[3];
+
+ // quality/speed settings
+ int method_; // 0=fastest, 6=best/slowest.
+ int rd_opt_level_; // Deduced from method_.
+
+ // Memory
+ VP8MBInfo* mb_info_; // contextual macroblock infos (mb_w_ + 1)
+ uint8_t* preds_; // predictions modes: (4*mb_w+1) * (4*mb_h+1)
+ uint32_t* nz_; // non-zero bit context: mb_w+1
+ uint8_t* yuv_in_; // input samples
+ uint8_t* yuv_out_; // output samples
+ uint8_t* yuv_out2_; // secondary scratch out-buffer. swapped with yuv_out_.
+ uint8_t* yuv_p_; // scratch buffer for prediction
+ uint8_t *y_top_; // top luma samples.
+ uint8_t *uv_top_; // top u/v samples.
+ // U and V are packed into 16 pixels (8 U + 8 V)
+ uint8_t *y_left_; // left luma samples (adressable from index -1 to 15).
+ uint8_t *u_left_; // left u samples (adressable from index -1 to 7)
+ uint8_t *v_left_; // left v samples (adressable from index -1 to 7)
+
+ LFStats *lf_stats_; // autofilter stats (if NULL, autofilter is off)
+};
+
+//-----------------------------------------------------------------------------
+// internal functions. Not public.
+
+ // in tree.c
+extern const uint8_t VP8CoeffsProba0[NUM_TYPES][NUM_BANDS][NUM_CTX][NUM_PROBAS];
+extern const uint8_t
+ VP8CoeffsUpdateProba[NUM_TYPES][NUM_BANDS][NUM_CTX][NUM_PROBAS];
+// Reset the token probabilities to their initial (default) values
+void VP8DefaultProbas(VP8Encoder* const enc);
+// Write the token probabilities
+void VP8WriteProbas(VP8BitWriter* const bw, const VP8Proba* const probas);
+// Writes the partition #0 modes (that is: all intra modes)
+void VP8CodeIntraModes(VP8Encoder* const enc);
+
+ // in syntax.c
+// Generates the final bitstream by coding the partition0 and headers,
+// and appending an assembly of all the pre-coded token partitions.
+// Return true if everything is ok.
+int VP8EncWrite(VP8Encoder* const enc);
+
+ // in frame.c
+extern const uint8_t VP8EncBands[16 + 1];
+// Form all the four Intra16x16 predictions in the yuv_p_ cache
+void VP8MakeLuma16Preds(const VP8EncIterator* const it);
+// Form all the four Chroma8x8 predictions in the yuv_p_ cache
+void VP8MakeChroma8Preds(const VP8EncIterator* const it);
+// Form all the ten Intra4x4 predictions in the yuv_p_ cache
+// for the 4x4 block it->i4_
+void VP8MakeIntra4Preds(const VP8EncIterator* const it);
+// Rate calculation
+int VP8GetCostLuma16(VP8EncIterator* const it, const VP8ModeScore* const rd);
+int VP8GetCostLuma4(VP8EncIterator* const it, const int16_t levels[16]);
+int VP8GetCostUV(VP8EncIterator* const it, const VP8ModeScore* const rd);
+// Main stat / coding passes
+int VP8EncLoop(VP8Encoder* const enc);
+int VP8StatLoop(VP8Encoder* const enc);
+
+ // in analysis.c
+// Main analysis loop. Decides the segmentations and complexity.
+// Assigns a first guess for Intra16 and uvmode_ prediction modes.
+int VP8EncAnalyze(VP8Encoder* const enc);
+
+ // in quant.c
+// Sets up segment's quantization values, base_quant_ and filter strengths.
+void VP8SetSegmentParams(VP8Encoder* const enc, float quality);
+// Pick best modes and fills the levels. Returns true if skipped.
+int VP8Decimate(VP8EncIterator* const it, VP8ModeScore* const rd, int rd_opt);
+
+ // in dsp.c
+// Transforms
+typedef void (*VP8Idct)(const uint8_t* ref, const int16_t* in, uint8_t* dst);
+typedef void (*VP8Fdct)(const uint8_t* src, const uint8_t* ref, int16_t* out);
+typedef void (*VP8WHT)(const int16_t* in, int16_t* out);
+extern VP8Idct VP8ITransform;
+extern VP8Fdct VP8FTransform;
+extern VP8WHT VP8ITransformWHT;
+extern VP8WHT VP8FTransformWHT;
+// Predictions
+// *dst is the destination block. *top, *top_right and *left can be NULL.
+typedef void (*VP8IntraPreds)(uint8_t *dst, const uint8_t* left,
+ const uint8_t* top);
+typedef void (*VP8Intra4Preds)(uint8_t *dst, const uint8_t* top);
+extern VP8Intra4Preds VP8EncPredLuma4;
+extern VP8IntraPreds VP8EncPredLuma16;
+extern VP8IntraPreds VP8EncPredChroma8;
+
+typedef int (*VP8Metric)(const uint8_t* pix, const uint8_t* ref);
+extern VP8Metric VP8SSE16x16, VP8SSE16x8, VP8SSE8x8, VP8SSE4x4;
+typedef int (*VP8WMetric)(const uint8_t* pix, const uint8_t* ref,
+ const uint16_t* const weights);
+extern VP8WMetric VP8TDisto4x4, VP8TDisto16x16;
+
+typedef void (*VP8BlockCopy)(const uint8_t* src, uint8_t* dst);
+extern VP8BlockCopy VP8Copy4x4;
+extern VP8BlockCopy VP8Copy8x8;
+extern VP8BlockCopy VP8Copy16x16;
+// Quantization
+typedef int (*VP8QuantizeBlock)(int16_t in[16], int16_t out[16],
+ int n, const VP8Matrix* const mtx);
+extern VP8QuantizeBlock VP8EncQuantizeBlock;
+
+typedef enum {
+ kSSE2,
+ kSSE3
+} CPUFeature;
+// returns true if the CPU supports the feature.
+typedef int (*VP8CPUInfo)(CPUFeature feature);
+extern VP8CPUInfo CPUInfo;
+
+void VP8EncDspInit(void); // must be called before using any of the above
+
+ // in filter.c
+extern void VP8InitFilter(VP8EncIterator* const it);
+extern void VP8StoreFilterStats(VP8EncIterator* const it);
+extern void VP8AdjustFilterStrength(VP8EncIterator* const it);
+
+//-----------------------------------------------------------------------------
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
+
+#endif // WEBP_ENC_VP8ENCI_H_
diff --git a/src/enc/webpenc.c b/src/enc/webpenc.c
new file mode 100644
index 00000000..59221d7f
--- /dev/null
+++ b/src/enc/webpenc.c
@@ -0,0 +1,317 @@
+// Copyright 2011 Google Inc.
+//
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
+// -----------------------------------------------------------------------------
+//
+// WebP encoder: main entry point
+//
+// Author: Skal (pascal.massimino@gmail.com)
+
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+
+#include "vp8enci.h"
+
+// #define PRINT_MEMORY_INFO
+
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+#ifdef PRINT_MEMORY_INFO
+#include <stdio.h>
+#endif
+
+#define MAX_DIMENSION 16384 // maximum width/height allowed by the spec
+
+//-----------------------------------------------------------------------------
+
+int WebPGetEncoderVersion(void) {
+ return (ENC_MAJ_VERSION << 16) | (ENC_MIN_VERSION << 8) | ENC_REV_VERSION;
+}
+
+//-----------------------------------------------------------------------------
+// WebPPicture
+//-----------------------------------------------------------------------------
+
+static int DummyWriter(const uint8_t* data, size_t data_size,
+ const WebPPicture* const picture) {
+ // The following are to prevent 'unused variable' error message.
+ (void)data;
+ (void)data_size;
+ (void)picture;
+ return 1;
+}
+
+int WebPPictureInitInternal(WebPPicture* const picture, int version) {
+ if (version != WEBP_ENCODER_ABI_VERSION) {
+ return 0; // caller/system version mismatch!
+ }
+ if (picture) {
+ memset(picture, 0, sizeof(*picture));
+ picture->writer = DummyWriter;
+ }
+ return 1;
+}
+
+//-----------------------------------------------------------------------------
+// VP8Encoder
+//-----------------------------------------------------------------------------
+
+static void ResetSegmentHeader(VP8Encoder* const enc) {
+ VP8SegmentHeader* const hdr = &enc->segment_hdr_;
+ hdr->num_segments_ = enc->config_->segments;
+ hdr->update_map_ = (hdr->num_segments_ > 1);
+ hdr->size_ = 0;
+}
+
+static void ResetFilterHeader(VP8Encoder* const enc) {
+ VP8FilterHeader* const hdr = &enc->filter_hdr_;
+ hdr->simple_ = 1;
+ hdr->level_ = 0;
+ hdr->sharpness_ = 0;
+ hdr->i4x4_lf_delta_ = 0;
+}
+
+static void ResetBoundaryPredictions(VP8Encoder* const enc) {
+ // init boundary values once for all
+ // Note: actually, initializing the preds_[] is only needed for intra4.
+ int i;
+ uint8_t* const top = enc->preds_ - enc->preds_w_;
+ uint8_t* const left = enc->preds_ - 1;
+ for (i = -1; i < 4 * enc->mb_w_; ++i) {
+ top[i] = B_DC_PRED;
+ }
+ for (i = 0; i < 4 * enc->mb_h_; ++i) {
+ left[i * enc->preds_w_] = B_DC_PRED;
+ }
+ enc->nz_[-1] = 0; // constant
+}
+
+// Map configured quality level to coding tools used.
+//-------------+---+---+---+---+---+---+
+// Quality | 0 | 1 | 2 | 3 | 4 | 5 +
+//-------------+---+---+---+---+---+---+
+// dynamic prob| ~ | x | x | x | x | x |
+//-------------+---+---+---+---+---+---+
+// rd-opt modes| | | x | x | x | x |
+//-------------+---+---+---+---+---+---+
+// fast i4/i16 | x | x | | | | |
+//-------------+---+---+---+---+---+---+
+// rd-opt i4/16| | | x | x | x | x |
+//-------------+---+---+---+---+---+---+
+// Trellis | | x | | | x | x |
+//-------------+---+---+---+---+---+---+
+// full-SNS | | | | | | x |
+//-------------+---+---+---+---+---+---+
+
+static void MapConfigToTools(VP8Encoder* const enc) {
+ const int method = enc->config_->method;
+ enc->method_ = method;
+ enc->rd_opt_level_ = (method >= 6) ? 3
+ : (method >= 5) ? 2
+ : (method >= 3) ? 1
+ : 0;
+}
+
+// Memory scaling with dimensions:
+// memory (bytes) ~= 2.25 * w + 0.0625 * w * h
+//
+// Typical memory footprint (768x510 picture)
+// Memory used:
+// encoder: 33919
+// block cache: 2880
+// info: 3072
+// preds: 24897
+// top samples: 1623
+// non-zero: 196
+// lf-stats: 2048
+// total: 68635
+// Transcient object sizes:
+// VP8EncIterator: 352
+// VP8ModeScore: 912
+// VP8SegmentInfo: 532
+// VP8Proba: 31032
+// LFStats: 2048
+// Picture size (yuv): 589824
+
+static VP8Encoder* InitEncoder(const WebPConfig* const config,
+ WebPPicture* const picture) {
+ const int use_filter =
+ (config->filter_strength > 0) || (config->autofilter > 0);
+ const int mb_w = (picture->width + 15) >> 4;
+ const int mb_h = (picture->height + 15) >> 4;
+ const int preds_w = 4 * mb_w + 1;
+ const int preds_h = 4 * mb_h + 1;
+ const size_t preds_size = preds_w * preds_h * sizeof(uint8_t);
+ const int top_stride = mb_w * 16;
+ const size_t nz_size = (mb_w + 1) * sizeof(uint32_t);
+ const size_t cache_size = (3 * YUV_SIZE + PRED_SIZE) * sizeof(uint8_t);
+ const size_t info_size = mb_w * mb_h * sizeof(VP8MBInfo);
+ const size_t samples_size = (2 * top_stride + // top-luma/u/v
+ 16 + 16 + 16 + 8 + 1 + // left y/u/v
+ 2 * ALIGN_CST) // align all
+ * sizeof(uint8_t);
+ const size_t lf_stats_size = config->autofilter ? sizeof(LFStats) : 0;
+ VP8Encoder* enc;
+ uint8_t* mem;
+ size_t size = sizeof(VP8Encoder) + ALIGN_CST // main struct
+ + cache_size // working caches
+ + info_size // modes info
+ + preds_size // prediction modes
+ + samples_size // top/left samples
+ + nz_size // coeff context bits
+ + lf_stats_size; // autofilter stats
+
+#ifdef PRINT_MEMORY_INFO
+ printf("===================================\n");
+ printf("Memory used:\n"
+ " encoder: %ld\n"
+ " block cache: %ld\n"
+ " info: %ld\n"
+ " preds: %ld\n"
+ " top samples: %ld\n"
+ " non-zero: %ld\n"
+ " lf-stats: %ld\n"
+ " total: %ld\n",
+ sizeof(VP8Encoder) + ALIGN_CST, cache_size, info_size,
+ preds_size, samples_size, nz_size, lf_stats_size, size);
+ printf("Transcient object sizes:\n"
+ " VP8EncIterator: %ld\n"
+ " VP8ModeScore: %ld\n"
+ " VP8SegmentInfo: %ld\n"
+ " VP8Proba: %ld\n"
+ " LFStats: %ld\n",
+ sizeof(VP8EncIterator), sizeof(VP8ModeScore),
+ sizeof(VP8SegmentInfo), sizeof(VP8Proba),
+ sizeof(LFStats));
+ printf("Picture size (yuv): %ld\n",
+ mb_w * mb_h * 384 * sizeof(uint8_t));
+ printf("===================================\n");
+#endif
+ mem = (uint8_t*)malloc(size);
+ if (mem == NULL) return NULL;
+ enc = (VP8Encoder*)mem;
+ mem = (uint8_t*)DO_ALIGN(mem + sizeof(*enc));
+ memset(enc, 0, sizeof(*enc));
+ enc->num_parts_ = 1 << config->partitions;
+ enc->mb_w_ = mb_w;
+ enc->mb_h_ = mb_h;
+ enc->preds_w_ = preds_w;
+ enc->yuv_in_ = (uint8_t*)mem;
+ mem += YUV_SIZE;
+ enc->yuv_out_ = (uint8_t*)mem;
+ mem += YUV_SIZE;
+ enc->yuv_out2_ = (uint8_t*)mem;
+ mem += YUV_SIZE;
+ enc->yuv_p_ = (uint8_t*)mem;
+ mem += PRED_SIZE;
+ enc->mb_info_ = (VP8MBInfo*)mem;
+ mem += info_size;
+ enc->preds_ = ((uint8_t*)mem) + 1 + enc->preds_w_;
+ mem += preds_w * preds_h * sizeof(uint8_t);
+ enc->nz_ = 1 + (uint32_t*)mem;
+ mem += nz_size;
+ enc->lf_stats_ = lf_stats_size ? (LFStats*)mem : NULL;
+ mem += lf_stats_size;
+
+ // top samples (all 16-aligned)
+ mem = (uint8_t*)DO_ALIGN(mem);
+ enc->y_top_ = (uint8_t*)mem;
+ enc->uv_top_ = enc->y_top_ + top_stride;
+ mem += 2 * top_stride;
+ mem = (uint8_t*)DO_ALIGN(mem + 1);
+ enc->y_left_ = (uint8_t*)mem;
+ mem += 16 + 16;
+ enc->u_left_ = (uint8_t*)mem;
+ mem += 16;
+ enc->v_left_ = (uint8_t*)mem;
+ mem += 8;
+
+ enc->config_ = config;
+ enc->profile_ = use_filter ? ((config->filter_type == 1) ? 0 : 1) : 2;
+ enc->pic_ = picture;
+
+ MapConfigToTools(enc);
+ VP8EncDspInit();
+ VP8DefaultProbas(enc);
+ ResetSegmentHeader(enc);
+ ResetFilterHeader(enc);
+ ResetBoundaryPredictions(enc);
+
+ return enc;
+}
+
+static void DeleteEncoder(VP8Encoder* enc) {
+ free(enc);
+}
+
+//-----------------------------------------------------------------------------
+
+static double GetPSNR(uint64_t err, uint64_t size) {
+ return err ? 10. * log10(255. * 255. * size / err) : 99.;
+}
+
+static void FinalizePSNR(const VP8Encoder* const enc) {
+ WebPAuxStats* stats = enc->pic_->stats;
+ const uint64_t size = enc->sse_count_;
+ const uint64_t* const sse = enc->sse_;
+ stats->PSNR[0] = (float)GetPSNR(sse[0], size);
+ stats->PSNR[1] = (float)GetPSNR(sse[1], size / 4);
+ stats->PSNR[2] = (float)GetPSNR(sse[2], size / 4);
+ stats->PSNR[3] = (float)GetPSNR(sse[0] + sse[1] + sse[2], size * 3 / 2);
+}
+
+static void StoreStats(VP8Encoder* const enc) {
+ WebPAuxStats* const stats = enc->pic_->stats;
+ if (stats) {
+ int i, s;
+ for (i = 0; i < NUM_MB_SEGMENTS; ++i) {
+ stats->segment_level[i] = enc->dqm_[i].fstrength_;
+ stats->segment_quant[i] = enc->dqm_[i].quant_;
+ for (s = 0; s <= 2; ++s) {
+ stats->residual_bytes[s][i] = enc->residual_bytes_[s][i];
+ }
+ }
+ FinalizePSNR(enc);
+ stats->coded_size = enc->coded_size_;
+ for (i = 0; i < 3; ++i) {
+ stats->block_count[i] = enc->block_count_[i];
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------
+
+int WebPEncode(const WebPConfig* const config, WebPPicture* const pic) {
+ VP8Encoder* enc;
+ int ok;
+
+ if (config == NULL || pic == NULL)
+ return 0; // bad params
+ if (!WebPValidateConfig(config))
+ return 0; // invalid config.
+ if (pic->width <= 0 || pic->height <= 0)
+ return 0; // invalid parameters
+ if (pic->y == NULL || pic->u == NULL || pic->v == NULL)
+ return 0; // invalid parameters
+ if (pic->width >= MAX_DIMENSION || pic->height >= MAX_DIMENSION)
+ return 0; // image is too big
+
+ enc = InitEncoder(config, pic);
+ if (enc == NULL) return 0;
+ ok = VP8EncAnalyze(enc)
+ && VP8StatLoop(enc)
+ && VP8EncLoop(enc)
+ && VP8EncWrite(enc);
+ StoreStats(enc);
+ DeleteEncoder(enc);
+ return ok;
+}
+
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif