From a0fe94ae6a30f57b7f0c04efb95c995c2fd09a88 Mon Sep 17 00:00:00 2001 From: Ram Mohan Date: Thu, 30 Nov 2023 01:51:30 +0530 Subject: restructure folders to avoid possible namespace collision source files are restructured according to coding guidelines --- Android.bp | 22 +- CMakeLists.txt | 4 +- README.md | 4 +- benchmark/benchmark_test.cpp | 2 +- examples/ultrahdr_app.cpp | 6 +- fuzzer/ultrahdr_dec_fuzzer.cpp | 2 +- fuzzer/ultrahdr_enc_fuzzer.cpp | 6 +- lib/gainmapmath.cpp | 733 -------------- lib/gainmapmath.h | 498 ---------- lib/icc.cpp | 680 ------------- lib/icc.h | 261 ----- lib/include/ultrahdr/gainmapmath.h | 498 ++++++++++ lib/include/ultrahdr/icc.h | 261 +++++ lib/include/ultrahdr/jpegdecoderhelper.h | 154 +++ lib/include/ultrahdr/jpegencoderhelper.h | 106 ++ lib/include/ultrahdr/jpegr.h | 464 +++++++++ lib/include/ultrahdr/jpegrutils.h | 152 +++ lib/include/ultrahdr/multipictureformat.h | 78 ++ lib/include/ultrahdr/ultrahdr.h | 82 ++ lib/include/ultrahdr/ultrahdrcommon.h | 64 ++ lib/jpegdecoderhelper.cpp | 537 ---------- lib/jpegdecoderhelper.h | 154 --- lib/jpegencoderhelper.cpp | 287 ------ lib/jpegencoderhelper.h | 106 -- lib/jpegr.cpp | 1509 ----------------------------- lib/jpegr.h | 464 --------- lib/jpegrutils.cpp | 583 ----------- lib/jpegrutils.h | 152 --- lib/multipictureformat.cpp | 92 -- lib/multipictureformat.h | 78 -- lib/src/gainmapmath.cpp | 733 ++++++++++++++ lib/src/icc.cpp | 680 +++++++++++++ lib/src/jpegdecoderhelper.cpp | 537 ++++++++++ lib/src/jpegencoderhelper.cpp | 287 ++++++ lib/src/jpegr.cpp | 1509 +++++++++++++++++++++++++++++ lib/src/jpegrutils.cpp | 583 +++++++++++ lib/src/multipictureformat.cpp | 92 ++ lib/ultrahdr.h | 82 -- lib/ultrahdrcommon.h | 64 -- tests/gainmapmath_test.cpp | 2 +- tests/icchelper_test.cpp | 2 +- tests/jpegdecoderhelper_test.cpp | 6 +- tests/jpegencoderhelper_test.cpp | 6 +- tests/jpegr_test.cpp | 6 +- 44 files changed, 6314 insertions(+), 6314 deletions(-) delete mode 100644 lib/gainmapmath.cpp delete mode 100644 lib/gainmapmath.h delete mode 100644 lib/icc.cpp delete mode 100644 lib/icc.h create mode 100644 lib/include/ultrahdr/gainmapmath.h create mode 100644 lib/include/ultrahdr/icc.h create mode 100644 lib/include/ultrahdr/jpegdecoderhelper.h create mode 100644 lib/include/ultrahdr/jpegencoderhelper.h create mode 100644 lib/include/ultrahdr/jpegr.h create mode 100644 lib/include/ultrahdr/jpegrutils.h create mode 100644 lib/include/ultrahdr/multipictureformat.h create mode 100644 lib/include/ultrahdr/ultrahdr.h create mode 100644 lib/include/ultrahdr/ultrahdrcommon.h delete mode 100644 lib/jpegdecoderhelper.cpp delete mode 100644 lib/jpegdecoderhelper.h delete mode 100644 lib/jpegencoderhelper.cpp delete mode 100644 lib/jpegencoderhelper.h delete mode 100644 lib/jpegr.cpp delete mode 100644 lib/jpegr.h delete mode 100644 lib/jpegrutils.cpp delete mode 100644 lib/jpegrutils.h delete mode 100644 lib/multipictureformat.cpp delete mode 100644 lib/multipictureformat.h create mode 100644 lib/src/gainmapmath.cpp create mode 100644 lib/src/icc.cpp create mode 100644 lib/src/jpegdecoderhelper.cpp create mode 100644 lib/src/jpegencoderhelper.cpp create mode 100644 lib/src/jpegr.cpp create mode 100644 lib/src/jpegrutils.cpp create mode 100644 lib/src/multipictureformat.cpp delete mode 100644 lib/ultrahdr.h delete mode 100644 lib/ultrahdrcommon.h diff --git a/Android.bp b/Android.bp index d3cb33e..ce5f3a0 100644 --- a/Android.bp +++ b/Android.bp @@ -36,15 +36,15 @@ cc_library { enabled: false, host_supported: true, vendor_available: true, - export_include_dirs: ["lib"], - local_include_dirs: ["lib"], + export_include_dirs: ["lib/include"], + local_include_dirs: ["lib/include"], srcs: [ - "lib/icc.cpp", - "lib/jpegr.cpp", - "lib/gainmapmath.cpp", - "lib/jpegrutils.cpp", - "lib/multipictureformat.cpp", + "lib/src/icc.cpp", + "lib/src/jpegr.cpp", + "lib/src/gainmapmath.cpp", + "lib/src/jpegrutils.cpp", + "lib/src/multipictureformat.cpp", ], shared_libs: [ @@ -67,10 +67,10 @@ cc_library { "liblog", ], - export_include_dirs: ["lib"], + export_include_dirs: ["lib/include"], srcs: [ - "lib/jpegencoderhelper.cpp", + "lib/src/jpegencoderhelper.cpp", ], } @@ -85,9 +85,9 @@ cc_library { "liblog", ], - export_include_dirs: ["lib"], + export_include_dirs: ["lib/include"], srcs: [ - "lib/jpegdecoderhelper.cpp", + "lib/src/jpegdecoderhelper.cpp", ], } diff --git a/CMakeLists.txt b/CMakeLists.txt index 84091f4..857972a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -295,12 +295,12 @@ set_property(DIRECTORY PROPERTY ADDITIONAL_MAKE_CLEAN_FILES ########################################################### # File Lists ########################################################### -file(GLOB UHDR_LIB_LIST "${SOURCE_DIR}/*.cpp") +file(GLOB UHDR_LIB_LIST "${SOURCE_DIR}/src/*.cpp") file(GLOB UHDR_TEST_LIST "${TESTS_DIR}/*.cpp") file(GLOB UHDR_BM_LIST "${BENCHMARK_DIR}/*.cpp") file(GLOB IMAGE_IO_LIST "${THIRD_PARTY_DIR}/image_io/src/**/*.cc") -set(COMMON_INCLUDE_LIST ${SOURCE_DIR} ${JPEG_INCLUDE_DIRS}) +set(COMMON_INCLUDE_LIST ${SOURCE_DIR}/include/ ${JPEG_INCLUDE_DIRS}) set(COMMON_LIBS_LIST ${JPEG_LIBRARIES} Threads::Threads) ########################################################### diff --git a/README.md b/README.md index 27e5361..5c341b3 100644 --- a/README.md +++ b/README.md @@ -83,7 +83,7 @@ This will generate the following files under *{build_directory/Release}*: mkdir {build_directory} cd {build_directory} cmake -G "NMake Makefiles" -DUHDR_BUILD_TESTS=1 ../ - cmake --build ./ --config=Release + cmake --build ./ ctest This will generate the following files under *{build_directory}*: @@ -132,5 +132,5 @@ Using libultrahdr libultrahdr includes two classes of APIs, one to compress and the other to decompress HDR images: -- Refer to [jpegr.h](lib/jpegr.h) for detailed description of various encode and decode api. +- Refer to [jpegr.h](lib/include/ultrahdr/jpegr.h) for detailed description of various encode and decode api. - Refer to [ultrahdr_app.cpp](examples/ultrahdr_app.cpp) for examples of its usage. diff --git a/benchmark/benchmark_test.cpp b/benchmark/benchmark_test.cpp index e9f9533..d013838 100644 --- a/benchmark/benchmark_test.cpp +++ b/benchmark/benchmark_test.cpp @@ -19,7 +19,7 @@ #include -#include "jpegr.h" +#include "ultrahdr/jpegr.h" using namespace ultrahdr; diff --git a/examples/ultrahdr_app.cpp b/examples/ultrahdr_app.cpp index f3fd372..43b4b50 100644 --- a/examples/ultrahdr_app.cpp +++ b/examples/ultrahdr_app.cpp @@ -27,9 +27,9 @@ #include #include -#include "ultrahdrcommon.h" -#include "gainmapmath.h" -#include "jpegr.h" +#include "ultrahdr/ultrahdrcommon.h" +#include "ultrahdr/gainmapmath.h" +#include "ultrahdr/jpegr.h" using namespace ultrahdr; diff --git a/fuzzer/ultrahdr_dec_fuzzer.cpp b/fuzzer/ultrahdr_dec_fuzzer.cpp index 0e9c5d3..49dc913 100644 --- a/fuzzer/ultrahdr_dec_fuzzer.cpp +++ b/fuzzer/ultrahdr_dec_fuzzer.cpp @@ -18,7 +18,7 @@ #include #include -#include "jpegr.h" +#include "ultrahdr/jpegr.h" using namespace ultrahdr; diff --git a/fuzzer/ultrahdr_enc_fuzzer.cpp b/fuzzer/ultrahdr_enc_fuzzer.cpp index db6021e..23cc845 100644 --- a/fuzzer/ultrahdr_enc_fuzzer.cpp +++ b/fuzzer/ultrahdr_enc_fuzzer.cpp @@ -20,9 +20,9 @@ #include #include -#include "ultrahdrcommon.h" -#include "gainmapmath.h" -#include "jpegr.h" +#include "ultrahdr/ultrahdrcommon.h" +#include "ultrahdr/gainmapmath.h" +#include "ultrahdr/jpegr.h" using namespace ultrahdr; diff --git a/lib/gainmapmath.cpp b/lib/gainmapmath.cpp deleted file mode 100644 index 23791c2..0000000 --- a/lib/gainmapmath.cpp +++ /dev/null @@ -1,733 +0,0 @@ -/* - * Copyright 2022 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "gainmapmath.h" - -namespace ultrahdr { - -static const std::vector kPqOETF = [] { - std::vector result; - for (size_t idx = 0; idx < kPqOETFNumEntries; idx++) { - float value = static_cast(idx) / static_cast(kPqOETFNumEntries - 1); - result.push_back(pqOetf(value)); - } - return result; -}(); - -static const std::vector kPqInvOETF = [] { - std::vector result; - for (size_t idx = 0; idx < kPqInvOETFNumEntries; idx++) { - float value = static_cast(idx) / static_cast(kPqInvOETFNumEntries - 1); - result.push_back(pqInvOetf(value)); - } - return result; -}(); - -static const std::vector kHlgOETF = [] { - std::vector result; - for (size_t idx = 0; idx < kHlgOETFNumEntries; idx++) { - float value = static_cast(idx) / static_cast(kHlgOETFNumEntries - 1); - result.push_back(hlgOetf(value)); - } - return result; -}(); - -static const std::vector kHlgInvOETF = [] { - std::vector result; - for (size_t idx = 0; idx < kHlgInvOETFNumEntries; idx++) { - float value = static_cast(idx) / static_cast(kHlgInvOETFNumEntries - 1); - result.push_back(hlgInvOetf(value)); - } - return result; -}(); - -static const std::vector kSrgbInvOETF = [] { - std::vector result; - for (size_t idx = 0; idx < kSrgbInvOETFNumEntries; idx++) { - float value = static_cast(idx) / static_cast(kSrgbInvOETFNumEntries - 1); - result.push_back(srgbInvOetf(value)); - } - return result; -}(); - -// Use Shepard's method for inverse distance weighting. For more information: -// en.wikipedia.org/wiki/Inverse_distance_weighting#Shepard's_method - -float ShepardsIDW::euclideanDistance(float x1, float x2, float y1, float y2) { - return sqrt(((y2 - y1) * (y2 - y1)) + (x2 - x1) * (x2 - x1)); -} - -void ShepardsIDW::fillShepardsIDW(float* weights, int incR, int incB) { - for (int y = 0; y < mMapScaleFactor; y++) { - for (int x = 0; x < mMapScaleFactor; x++) { - float pos_x = ((float)x) / mMapScaleFactor; - float pos_y = ((float)y) / mMapScaleFactor; - int curr_x = floor(pos_x); - int curr_y = floor(pos_y); - int next_x = curr_x + incR; - int next_y = curr_y + incB; - float e1_distance = euclideanDistance(pos_x, curr_x, pos_y, curr_y); - int index = y * mMapScaleFactor * 4 + x * 4; - if (e1_distance == 0) { - weights[index++] = 1.f; - weights[index++] = 0.f; - weights[index++] = 0.f; - weights[index++] = 0.f; - } else { - float e1_weight = 1.f / e1_distance; - - float e2_distance = euclideanDistance(pos_x, curr_x, pos_y, next_y); - float e2_weight = 1.f / e2_distance; - - float e3_distance = euclideanDistance(pos_x, next_x, pos_y, curr_y); - float e3_weight = 1.f / e3_distance; - - float e4_distance = euclideanDistance(pos_x, next_x, pos_y, next_y); - float e4_weight = 1.f / e4_distance; - - float total_weight = e1_weight + e2_weight + e3_weight + e4_weight; - - weights[index++] = e1_weight / total_weight; - weights[index++] = e2_weight / total_weight; - weights[index++] = e3_weight / total_weight; - weights[index++] = e4_weight / total_weight; - } - } - } -} - -//////////////////////////////////////////////////////////////////////////////// -// sRGB transformations - -static const float kMaxPixelFloat = 1.0f; -static float clampPixelFloat(float value) { - return (value < 0.0f) ? 0.0f : (value > kMaxPixelFloat) ? kMaxPixelFloat : value; -} - -// See IEC 61966-2-1/Amd 1:2003, Equation F.7. -static const float kSrgbR = 0.2126f, kSrgbG = 0.7152f, kSrgbB = 0.0722f; - -float srgbLuminance(Color e) { return kSrgbR * e.r + kSrgbG * e.g + kSrgbB * e.b; } - -// See ITU-R BT.709-6, Section 3. -// Uses the same coefficients for deriving luma signal as -// IEC 61966-2-1/Amd 1:2003 states for luminance, so we reuse the luminance -// function above. -static const float kSrgbCb = 1.8556f, kSrgbCr = 1.5748f; - -Color srgbRgbToYuv(Color e_gamma) { - float y_gamma = srgbLuminance(e_gamma); - return {{{y_gamma, (e_gamma.b - y_gamma) / kSrgbCb, (e_gamma.r - y_gamma) / kSrgbCr}}}; -} - -// See ITU-R BT.709-6, Section 3. -// Same derivation to BT.2100's YUV->RGB, below. Similar to srgbRgbToYuv, we -// can reuse the luminance coefficients since they are the same. -static const float kSrgbGCb = kSrgbB * kSrgbCb / kSrgbG; -static const float kSrgbGCr = kSrgbR * kSrgbCr / kSrgbG; - -Color srgbYuvToRgb(Color e_gamma) { - return {{{clampPixelFloat(e_gamma.y + kSrgbCr * e_gamma.v), - clampPixelFloat(e_gamma.y - kSrgbGCb * e_gamma.u - kSrgbGCr * e_gamma.v), - clampPixelFloat(e_gamma.y + kSrgbCb * e_gamma.u)}}}; -} - -// See IEC 61966-2-1/Amd 1:2003, Equations F.5 and F.6. -float srgbInvOetf(float e_gamma) { - if (e_gamma <= 0.04045f) { - return e_gamma / 12.92f; - } else { - return pow((e_gamma + 0.055f) / 1.055f, 2.4); - } -} - -Color srgbInvOetf(Color e_gamma) { - return {{{srgbInvOetf(e_gamma.r), srgbInvOetf(e_gamma.g), srgbInvOetf(e_gamma.b)}}}; -} - -// See IEC 61966-2-1, Equations F.5 and F.6. -float srgbInvOetfLUT(float e_gamma) { - uint32_t value = static_cast(e_gamma * (kSrgbInvOETFNumEntries - 1) + 0.5); - // TODO() : Remove once conversion modules have appropriate clamping in place - value = CLIP3(value, 0, kSrgbInvOETFNumEntries - 1); - return kSrgbInvOETF[value]; -} - -Color srgbInvOetfLUT(Color e_gamma) { - return {{{srgbInvOetfLUT(e_gamma.r), srgbInvOetfLUT(e_gamma.g), srgbInvOetfLUT(e_gamma.b)}}}; -} - -//////////////////////////////////////////////////////////////////////////////// -// Display-P3 transformations - -// See SMPTE EG 432-1, Equation 7-8. -static const float kP3R = 0.20949f, kP3G = 0.72160f, kP3B = 0.06891f; - -float p3Luminance(Color e) { return kP3R * e.r + kP3G * e.g + kP3B * e.b; } - -// See ITU-R BT.601-7, Sections 2.5.1 and 2.5.2. -// Unfortunately, calculation of luma signal differs from calculation of -// luminance for Display-P3, so we can't reuse p3Luminance here. -static const float kP3YR = 0.299f, kP3YG = 0.587f, kP3YB = 0.114f; -static const float kP3Cb = 1.772f, kP3Cr = 1.402f; - -Color p3RgbToYuv(Color e_gamma) { - float y_gamma = kP3YR * e_gamma.r + kP3YG * e_gamma.g + kP3YB * e_gamma.b; - return {{{y_gamma, (e_gamma.b - y_gamma) / kP3Cb, (e_gamma.r - y_gamma) / kP3Cr}}}; -} - -// See ITU-R BT.601-7, Sections 2.5.1 and 2.5.2. -// Same derivation to BT.2100's YUV->RGB, below. Similar to p3RgbToYuv, we must -// use luma signal coefficients rather than the luminance coefficients. -static const float kP3GCb = kP3YB * kP3Cb / kP3YG; -static const float kP3GCr = kP3YR * kP3Cr / kP3YG; - -Color p3YuvToRgb(Color e_gamma) { - return {{{clampPixelFloat(e_gamma.y + kP3Cr * e_gamma.v), - clampPixelFloat(e_gamma.y - kP3GCb * e_gamma.u - kP3GCr * e_gamma.v), - clampPixelFloat(e_gamma.y + kP3Cb * e_gamma.u)}}}; -} - -//////////////////////////////////////////////////////////////////////////////// -// BT.2100 transformations - according to ITU-R BT.2100-2 - -// See ITU-R BT.2100-2, Table 5, HLG Reference OOTF -static const float kBt2100R = 0.2627f, kBt2100G = 0.6780f, kBt2100B = 0.0593f; - -float bt2100Luminance(Color e) { return kBt2100R * e.r + kBt2100G * e.g + kBt2100B * e.b; } - -// See ITU-R BT.2100-2, Table 6, Derivation of colour difference signals. -// BT.2100 uses the same coefficients for calculating luma signal and luminance, -// so we reuse the luminance function here. -static const float kBt2100Cb = 1.8814f, kBt2100Cr = 1.4746f; - -Color bt2100RgbToYuv(Color e_gamma) { - float y_gamma = bt2100Luminance(e_gamma); - return {{{y_gamma, (e_gamma.b - y_gamma) / kBt2100Cb, (e_gamma.r - y_gamma) / kBt2100Cr}}}; -} - -// See ITU-R BT.2100-2, Table 6, Derivation of colour difference signals. -// -// Similar to bt2100RgbToYuv above, we can reuse the luminance coefficients. -// -// Derived by inversing bt2100RgbToYuv. The derivation for R and B are pretty -// straight forward; we just invert the formulas for U and V above. But deriving -// the formula for G is a bit more complicated: -// -// Start with equation for luminance: -// Y = kBt2100R * R + kBt2100G * G + kBt2100B * B -// Solve for G: -// G = (Y - kBt2100R * R - kBt2100B * B) / kBt2100B -// Substitute equations for R and B in terms YUV: -// G = (Y - kBt2100R * (Y + kBt2100Cr * V) - kBt2100B * (Y + kBt2100Cb * U)) / kBt2100B -// Simplify: -// G = Y * ((1 - kBt2100R - kBt2100B) / kBt2100G) -// + U * (kBt2100B * kBt2100Cb / kBt2100G) -// + V * (kBt2100R * kBt2100Cr / kBt2100G) -// -// We then get the following coeficients for calculating G from YUV: -// -// Coef for Y = (1 - kBt2100R - kBt2100B) / kBt2100G = 1 -// Coef for U = kBt2100B * kBt2100Cb / kBt2100G = kBt2100GCb = ~0.1645 -// Coef for V = kBt2100R * kBt2100Cr / kBt2100G = kBt2100GCr = ~0.5713 - -static const float kBt2100GCb = kBt2100B * kBt2100Cb / kBt2100G; -static const float kBt2100GCr = kBt2100R * kBt2100Cr / kBt2100G; - -Color bt2100YuvToRgb(Color e_gamma) { - return {{{clampPixelFloat(e_gamma.y + kBt2100Cr * e_gamma.v), - clampPixelFloat(e_gamma.y - kBt2100GCb * e_gamma.u - kBt2100GCr * e_gamma.v), - clampPixelFloat(e_gamma.y + kBt2100Cb * e_gamma.u)}}}; -} - -// See ITU-R BT.2100-2, Table 5, HLG Reference OETF. -static const float kHlgA = 0.17883277f, kHlgB = 0.28466892f, kHlgC = 0.55991073; - -float hlgOetf(float e) { - if (e <= 1.0f / 12.0f) { - return sqrt(3.0f * e); - } else { - return kHlgA * log(12.0f * e - kHlgB) + kHlgC; - } -} - -Color hlgOetf(Color e) { return {{{hlgOetf(e.r), hlgOetf(e.g), hlgOetf(e.b)}}}; } - -float hlgOetfLUT(float e) { - uint32_t value = static_cast(e * (kHlgOETFNumEntries - 1) + 0.5); - // TODO() : Remove once conversion modules have appropriate clamping in place - value = CLIP3(value, 0, kHlgOETFNumEntries - 1); - - return kHlgOETF[value]; -} - -Color hlgOetfLUT(Color e) { return {{{hlgOetfLUT(e.r), hlgOetfLUT(e.g), hlgOetfLUT(e.b)}}}; } - -// See ITU-R BT.2100-2, Table 5, HLG Reference EOTF. -float hlgInvOetf(float e_gamma) { - if (e_gamma <= 0.5f) { - return pow(e_gamma, 2.0f) / 3.0f; - } else { - return (exp((e_gamma - kHlgC) / kHlgA) + kHlgB) / 12.0f; - } -} - -Color hlgInvOetf(Color e_gamma) { - return {{{hlgInvOetf(e_gamma.r), hlgInvOetf(e_gamma.g), hlgInvOetf(e_gamma.b)}}}; -} - -float hlgInvOetfLUT(float e_gamma) { - uint32_t value = static_cast(e_gamma * (kHlgInvOETFNumEntries - 1) + 0.5); - // TODO() : Remove once conversion modules have appropriate clamping in place - value = CLIP3(value, 0, kHlgInvOETFNumEntries - 1); - - return kHlgInvOETF[value]; -} - -Color hlgInvOetfLUT(Color e_gamma) { - return {{{hlgInvOetfLUT(e_gamma.r), hlgInvOetfLUT(e_gamma.g), hlgInvOetfLUT(e_gamma.b)}}}; -} - -// See ITU-R BT.2100-2, Table 4, Reference PQ OETF. -static const float kPqM1 = 2610.0f / 16384.0f, kPqM2 = 2523.0f / 4096.0f * 128.0f; -static const float kPqC1 = 3424.0f / 4096.0f, kPqC2 = 2413.0f / 4096.0f * 32.0f, - kPqC3 = 2392.0f / 4096.0f * 32.0f; - -float pqOetf(float e) { - if (e <= 0.0f) return 0.0f; - return pow((kPqC1 + kPqC2 * pow(e, kPqM1)) / (1 + kPqC3 * pow(e, kPqM1)), kPqM2); -} - -Color pqOetf(Color e) { return {{{pqOetf(e.r), pqOetf(e.g), pqOetf(e.b)}}}; } - -float pqOetfLUT(float e) { - uint32_t value = static_cast(e * (kPqOETFNumEntries - 1) + 0.5); - // TODO() : Remove once conversion modules have appropriate clamping in place - value = CLIP3(value, 0, kPqOETFNumEntries - 1); - - return kPqOETF[value]; -} - -Color pqOetfLUT(Color e) { return {{{pqOetfLUT(e.r), pqOetfLUT(e.g), pqOetfLUT(e.b)}}}; } - -// Derived from the inverse of the Reference PQ OETF. -static const float kPqInvA = 128.0f, kPqInvB = 107.0f, kPqInvC = 2413.0f, kPqInvD = 2392.0f, - kPqInvE = 6.2773946361f, kPqInvF = 0.0126833f; - -float pqInvOetf(float e_gamma) { - // This equation blows up if e_gamma is 0.0, and checking on <= 0.0 doesn't - // always catch 0.0. So, check on 0.0001, since anything this small will - // effectively be crushed to zero anyways. - if (e_gamma <= 0.0001f) return 0.0f; - return pow( - (kPqInvA * pow(e_gamma, kPqInvF) - kPqInvB) / (kPqInvC - kPqInvD * pow(e_gamma, kPqInvF)), - kPqInvE); -} - -Color pqInvOetf(Color e_gamma) { - return {{{pqInvOetf(e_gamma.r), pqInvOetf(e_gamma.g), pqInvOetf(e_gamma.b)}}}; -} - -float pqInvOetfLUT(float e_gamma) { - uint32_t value = static_cast(e_gamma * (kPqInvOETFNumEntries - 1) + 0.5); - // TODO() : Remove once conversion modules have appropriate clamping in place - value = CLIP3(value, 0, kPqInvOETFNumEntries - 1); - - return kPqInvOETF[value]; -} - -Color pqInvOetfLUT(Color e_gamma) { - return {{{pqInvOetfLUT(e_gamma.r), pqInvOetfLUT(e_gamma.g), pqInvOetfLUT(e_gamma.b)}}}; -} - -//////////////////////////////////////////////////////////////////////////////// -// Color conversions - -Color bt709ToP3(Color e) { - return {{{0.82254f * e.r + 0.17755f * e.g + 0.00006f * e.b, - 0.03312f * e.r + 0.96684f * e.g + -0.00001f * e.b, - 0.01706f * e.r + 0.07240f * e.g + 0.91049f * e.b}}}; -} - -Color bt709ToBt2100(Color e) { - return {{{0.62740f * e.r + 0.32930f * e.g + 0.04332f * e.b, - 0.06904f * e.r + 0.91958f * e.g + 0.01138f * e.b, - 0.01636f * e.r + 0.08799f * e.g + 0.89555f * e.b}}}; -} - -Color p3ToBt709(Color e) { - return {{{1.22482f * e.r + -0.22490f * e.g + -0.00007f * e.b, - -0.04196f * e.r + 1.04199f * e.g + 0.00001f * e.b, - -0.01961f * e.r + -0.07865f * e.g + 1.09831f * e.b}}}; -} - -Color p3ToBt2100(Color e) { - return {{{0.75378f * e.r + 0.19862f * e.g + 0.04754f * e.b, - 0.04576f * e.r + 0.94177f * e.g + 0.01250f * e.b, - -0.00121f * e.r + 0.01757f * e.g + 0.98359f * e.b}}}; -} - -Color bt2100ToBt709(Color e) { - return {{{1.66045f * e.r + -0.58764f * e.g + -0.07286f * e.b, - -0.12445f * e.r + 1.13282f * e.g + -0.00837f * e.b, - -0.01811f * e.r + -0.10057f * e.g + 1.11878f * e.b}}}; -} - -Color bt2100ToP3(Color e) { - return {{{1.34369f * e.r + -0.28223f * e.g + -0.06135f * e.b, - -0.06533f * e.r + 1.07580f * e.g + -0.01051f * e.b, - 0.00283f * e.r + -0.01957f * e.g + 1.01679f * e.b}}}; -} - -// TODO: confirm we always want to convert like this before calculating -// luminance. -ColorTransformFn getHdrConversionFn(ultrahdr_color_gamut sdr_gamut, - ultrahdr_color_gamut hdr_gamut) { - switch (sdr_gamut) { - case ULTRAHDR_COLORGAMUT_BT709: - switch (hdr_gamut) { - case ULTRAHDR_COLORGAMUT_BT709: - return identityConversion; - case ULTRAHDR_COLORGAMUT_P3: - return p3ToBt709; - case ULTRAHDR_COLORGAMUT_BT2100: - return bt2100ToBt709; - case ULTRAHDR_COLORGAMUT_UNSPECIFIED: - return nullptr; - } - break; - case ULTRAHDR_COLORGAMUT_P3: - switch (hdr_gamut) { - case ULTRAHDR_COLORGAMUT_BT709: - return bt709ToP3; - case ULTRAHDR_COLORGAMUT_P3: - return identityConversion; - case ULTRAHDR_COLORGAMUT_BT2100: - return bt2100ToP3; - case ULTRAHDR_COLORGAMUT_UNSPECIFIED: - return nullptr; - } - break; - case ULTRAHDR_COLORGAMUT_BT2100: - switch (hdr_gamut) { - case ULTRAHDR_COLORGAMUT_BT709: - return bt709ToBt2100; - case ULTRAHDR_COLORGAMUT_P3: - return p3ToBt2100; - case ULTRAHDR_COLORGAMUT_BT2100: - return identityConversion; - case ULTRAHDR_COLORGAMUT_UNSPECIFIED: - return nullptr; - } - break; - case ULTRAHDR_COLORGAMUT_UNSPECIFIED: - return nullptr; - } - return nullptr; -} - -// All of these conversions are derived from the respective input YUV->RGB conversion followed by -// the RGB->YUV for the receiving encoding. They are consistent with the RGB<->YUV functions in this -// file, given that we uses BT.709 encoding for sRGB and BT.601 encoding for Display-P3, to match -// DataSpace. - -Color yuv709To601(Color e_gamma) { - return {{{1.0f * e_gamma.y + 0.101579f * e_gamma.u + 0.196076f * e_gamma.v, - 0.0f * e_gamma.y + 0.989854f * e_gamma.u + -0.110653f * e_gamma.v, - 0.0f * e_gamma.y + -0.072453f * e_gamma.u + 0.983398f * e_gamma.v}}}; -} - -Color yuv709To2100(Color e_gamma) { - return {{{1.0f * e_gamma.y + -0.016969f * e_gamma.u + 0.096312f * e_gamma.v, - 0.0f * e_gamma.y + 0.995306f * e_gamma.u + -0.051192f * e_gamma.v, - 0.0f * e_gamma.y + 0.011507f * e_gamma.u + 1.002637f * e_gamma.v}}}; -} - -Color yuv601To709(Color e_gamma) { - return {{{1.0f * e_gamma.y + -0.118188f * e_gamma.u + -0.212685f * e_gamma.v, - 0.0f * e_gamma.y + 1.018640f * e_gamma.u + 0.114618f * e_gamma.v, - 0.0f * e_gamma.y + 0.075049f * e_gamma.u + 1.025327f * e_gamma.v}}}; -} - -Color yuv601To2100(Color e_gamma) { - return {{{1.0f * e_gamma.y + -0.128245f * e_gamma.u + -0.115879f * e_gamma.v, - 0.0f * e_gamma.y + 1.010016f * e_gamma.u + 0.061592f * e_gamma.v, - 0.0f * e_gamma.y + 0.086969f * e_gamma.u + 1.029350f * e_gamma.v}}}; -} - -Color yuv2100To709(Color e_gamma) { - return {{{1.0f * e_gamma.y + 0.018149f * e_gamma.u + -0.095132f * e_gamma.v, - 0.0f * e_gamma.y + 1.004123f * e_gamma.u + 0.051267f * e_gamma.v, - 0.0f * e_gamma.y + -0.011524f * e_gamma.u + 0.996782f * e_gamma.v}}}; -} - -Color yuv2100To601(Color e_gamma) { - return {{{1.0f * e_gamma.y + 0.117887f * e_gamma.u + 0.105521f * e_gamma.v, - 0.0f * e_gamma.y + 0.995211f * e_gamma.u + -0.059549f * e_gamma.v, - 0.0f * e_gamma.y + -0.084085f * e_gamma.u + 0.976518f * e_gamma.v}}}; -} - -void transformYuv420(jr_uncompressed_ptr image, size_t x_chroma, size_t y_chroma, - ColorTransformFn fn) { - Color yuv1 = getYuv420Pixel(image, x_chroma * 2, y_chroma * 2); - Color yuv2 = getYuv420Pixel(image, x_chroma * 2 + 1, y_chroma * 2); - Color yuv3 = getYuv420Pixel(image, x_chroma * 2, y_chroma * 2 + 1); - Color yuv4 = getYuv420Pixel(image, x_chroma * 2 + 1, y_chroma * 2 + 1); - - yuv1 = fn(yuv1); - yuv2 = fn(yuv2); - yuv3 = fn(yuv3); - yuv4 = fn(yuv4); - - Color new_uv = (yuv1 + yuv2 + yuv3 + yuv4) / 4.0f; - - size_t pixel_y1_idx = x_chroma * 2 + y_chroma * 2 * image->luma_stride; - size_t pixel_y2_idx = (x_chroma * 2 + 1) + y_chroma * 2 * image->luma_stride; - size_t pixel_y3_idx = x_chroma * 2 + (y_chroma * 2 + 1) * image->luma_stride; - size_t pixel_y4_idx = (x_chroma * 2 + 1) + (y_chroma * 2 + 1) * image->luma_stride; - - uint8_t& y1_uint = reinterpret_cast(image->data)[pixel_y1_idx]; - uint8_t& y2_uint = reinterpret_cast(image->data)[pixel_y2_idx]; - uint8_t& y3_uint = reinterpret_cast(image->data)[pixel_y3_idx]; - uint8_t& y4_uint = reinterpret_cast(image->data)[pixel_y4_idx]; - - size_t pixel_count = image->chroma_stride * image->height / 2; - size_t pixel_uv_idx = x_chroma + y_chroma * (image->chroma_stride); - - uint8_t& u_uint = reinterpret_cast(image->chroma_data)[pixel_uv_idx]; - uint8_t& v_uint = reinterpret_cast(image->chroma_data)[pixel_count + pixel_uv_idx]; - - y1_uint = static_cast(CLIP3((yuv1.y * 255.0f + 0.5f), 0, 255)); - y2_uint = static_cast(CLIP3((yuv2.y * 255.0f + 0.5f), 0, 255)); - y3_uint = static_cast(CLIP3((yuv3.y * 255.0f + 0.5f), 0, 255)); - y4_uint = static_cast(CLIP3((yuv4.y * 255.0f + 0.5f), 0, 255)); - - u_uint = static_cast(CLIP3((new_uv.u * 255.0f + 128.0f + 0.5f), 0, 255)); - v_uint = static_cast(CLIP3((new_uv.v * 255.0f + 128.0f + 0.5f), 0, 255)); -} - -//////////////////////////////////////////////////////////////////////////////// -// Gain map calculations -uint8_t encodeGain(float y_sdr, float y_hdr, ultrahdr_metadata_ptr metadata) { - return encodeGain(y_sdr, y_hdr, metadata, log2(metadata->minContentBoost), - log2(metadata->maxContentBoost)); -} - -uint8_t encodeGain(float y_sdr, float y_hdr, ultrahdr_metadata_ptr metadata, - float log2MinContentBoost, float log2MaxContentBoost) { - float gain = 1.0f; - if (y_sdr > 0.0f) { - gain = y_hdr / y_sdr; - } - - if (gain < metadata->minContentBoost) gain = metadata->minContentBoost; - if (gain > metadata->maxContentBoost) gain = metadata->maxContentBoost; - - return static_cast((log2(gain) - log2MinContentBoost) / - (log2MaxContentBoost - log2MinContentBoost) * 255.0f); -} - -Color applyGain(Color e, float gain, ultrahdr_metadata_ptr metadata) { - float logBoost = - log2(metadata->minContentBoost) * (1.0f - gain) + log2(metadata->maxContentBoost) * gain; - float gainFactor = exp2(logBoost); - return e * gainFactor; -} - -Color applyGain(Color e, float gain, ultrahdr_metadata_ptr metadata, float displayBoost) { - float logBoost = - log2(metadata->minContentBoost) * (1.0f - gain) + log2(metadata->maxContentBoost) * gain; - float gainFactor = exp2(logBoost * displayBoost / metadata->maxContentBoost); - return e * gainFactor; -} - -Color applyGainLUT(Color e, float gain, GainLUT& gainLUT) { - float gainFactor = gainLUT.getGainFactor(gain); - return e * gainFactor; -} - -Color getYuv420Pixel(jr_uncompressed_ptr image, size_t x, size_t y) { - uint8_t* luma_data = reinterpret_cast(image->data); - size_t luma_stride = image->luma_stride; - uint8_t* chroma_data = reinterpret_cast(image->chroma_data); - size_t chroma_stride = image->chroma_stride; - - size_t offset_cr = chroma_stride * (image->height / 2); - size_t pixel_y_idx = x + y * luma_stride; - size_t pixel_chroma_idx = x / 2 + (y / 2) * chroma_stride; - - uint8_t y_uint = luma_data[pixel_y_idx]; - uint8_t u_uint = chroma_data[pixel_chroma_idx]; - uint8_t v_uint = chroma_data[offset_cr + pixel_chroma_idx]; - - // 128 bias for UV given we are using jpeglib; see: - // https://github.com/kornelski/libjpeg/blob/master/structure.doc - return {{{static_cast(y_uint) / 255.0f, (static_cast(u_uint) - 128.0f) / 255.0f, - (static_cast(v_uint) - 128.0f) / 255.0f}}}; -} - -Color getP010Pixel(jr_uncompressed_ptr image, size_t x, size_t y) { - uint16_t* luma_data = reinterpret_cast(image->data); - size_t luma_stride = image->luma_stride == 0 ? image->width : image->luma_stride; - uint16_t* chroma_data = reinterpret_cast(image->chroma_data); - size_t chroma_stride = image->chroma_stride; - - size_t pixel_y_idx = y * luma_stride + x; - size_t pixel_u_idx = (y >> 1) * chroma_stride + (x & ~0x1); - size_t pixel_v_idx = pixel_u_idx + 1; - - uint16_t y_uint = luma_data[pixel_y_idx] >> 6; - uint16_t u_uint = chroma_data[pixel_u_idx] >> 6; - uint16_t v_uint = chroma_data[pixel_v_idx] >> 6; - - // Conversions include taking narrow-range into account. - return {{{(static_cast(y_uint) - 64.0f) / 876.0f, - (static_cast(u_uint) - 64.0f) / 896.0f - 0.5f, - (static_cast(v_uint) - 64.0f) / 896.0f - 0.5f}}}; -} - -typedef Color (*getPixelFn)(jr_uncompressed_ptr, size_t, size_t); - -static Color samplePixels(jr_uncompressed_ptr image, size_t map_scale_factor, size_t x, size_t y, - getPixelFn get_pixel_fn) { - Color e = {{{0.0f, 0.0f, 0.0f}}}; - for (size_t dy = 0; dy < map_scale_factor; ++dy) { - for (size_t dx = 0; dx < map_scale_factor; ++dx) { - e += get_pixel_fn(image, x * map_scale_factor + dx, y * map_scale_factor + dy); - } - } - - return e / static_cast(map_scale_factor * map_scale_factor); -} - -Color sampleYuv420(jr_uncompressed_ptr image, size_t map_scale_factor, size_t x, size_t y) { - return samplePixels(image, map_scale_factor, x, y, getYuv420Pixel); -} - -Color sampleP010(jr_uncompressed_ptr image, size_t map_scale_factor, size_t x, size_t y) { - return samplePixels(image, map_scale_factor, x, y, getP010Pixel); -} - -// TODO: do we need something more clever for filtering either the map or images -// to generate the map? - -static size_t clamp(const size_t& val, const size_t& low, const size_t& high) { - return val < low ? low : (high < val ? high : val); -} - -static float mapUintToFloat(uint8_t map_uint) { return static_cast(map_uint) / 255.0f; } - -static float pythDistance(float x_diff, float y_diff) { - return sqrt(pow(x_diff, 2.0f) + pow(y_diff, 2.0f)); -} - -// TODO: If map_scale_factor is guaranteed to be an integer, then remove the following. -float sampleMap(jr_uncompressed_ptr map, float map_scale_factor, size_t x, size_t y) { - float x_map = static_cast(x) / map_scale_factor; - float y_map = static_cast(y) / map_scale_factor; - - size_t x_lower = static_cast(floor(x_map)); - size_t x_upper = x_lower + 1; - size_t y_lower = static_cast(floor(y_map)); - size_t y_upper = y_lower + 1; - - x_lower = clamp(x_lower, 0, map->width - 1); - x_upper = clamp(x_upper, 0, map->width - 1); - y_lower = clamp(y_lower, 0, map->height - 1); - y_upper = clamp(y_upper, 0, map->height - 1); - - // Use Shepard's method for inverse distance weighting. For more information: - // en.wikipedia.org/wiki/Inverse_distance_weighting#Shepard's_method - - float e1 = mapUintToFloat(reinterpret_cast(map->data)[x_lower + y_lower * map->width]); - float e1_dist = - pythDistance(x_map - static_cast(x_lower), y_map - static_cast(y_lower)); - if (e1_dist == 0.0f) return e1; - - float e2 = mapUintToFloat(reinterpret_cast(map->data)[x_lower + y_upper * map->width]); - float e2_dist = - pythDistance(x_map - static_cast(x_lower), y_map - static_cast(y_upper)); - if (e2_dist == 0.0f) return e2; - - float e3 = mapUintToFloat(reinterpret_cast(map->data)[x_upper + y_lower * map->width]); - float e3_dist = - pythDistance(x_map - static_cast(x_upper), y_map - static_cast(y_lower)); - if (e3_dist == 0.0f) return e3; - - float e4 = mapUintToFloat(reinterpret_cast(map->data)[x_upper + y_upper * map->width]); - float e4_dist = - pythDistance(x_map - static_cast(x_upper), y_map - static_cast(y_upper)); - if (e4_dist == 0.0f) return e2; - - float e1_weight = 1.0f / e1_dist; - float e2_weight = 1.0f / e2_dist; - float e3_weight = 1.0f / e3_dist; - float e4_weight = 1.0f / e4_dist; - float total_weight = e1_weight + e2_weight + e3_weight + e4_weight; - - return e1 * (e1_weight / total_weight) + e2 * (e2_weight / total_weight) + - e3 * (e3_weight / total_weight) + e4 * (e4_weight / total_weight); -} - -float sampleMap(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y, - ShepardsIDW& weightTables) { - // TODO: If map_scale_factor is guaranteed to be an integer power of 2, then optimize the - // following by computing log2(map_scale_factor) once and then using >> log2(map_scale_factor) - size_t x_lower = x / map_scale_factor; - size_t x_upper = x_lower + 1; - size_t y_lower = y / map_scale_factor; - size_t y_upper = y_lower + 1; - - x_lower = std::min(x_lower, map->width - 1); - x_upper = std::min(x_upper, map->width - 1); - y_lower = std::min(y_lower, map->height - 1); - y_upper = std::min(y_upper, map->height - 1); - - float e1 = mapUintToFloat(reinterpret_cast(map->data)[x_lower + y_lower * map->width]); - float e2 = mapUintToFloat(reinterpret_cast(map->data)[x_lower + y_upper * map->width]); - float e3 = mapUintToFloat(reinterpret_cast(map->data)[x_upper + y_lower * map->width]); - float e4 = mapUintToFloat(reinterpret_cast(map->data)[x_upper + y_upper * map->width]); - - // TODO: If map_scale_factor is guaranteed to be an integer power of 2, then optimize the - // following by using & (map_scale_factor - 1) - int offset_x = x % map_scale_factor; - int offset_y = y % map_scale_factor; - - float* weights = weightTables.mWeights; - if (x_lower == x_upper && y_lower == y_upper) - weights = weightTables.mWeightsC; - else if (x_lower == x_upper) - weights = weightTables.mWeightsNR; - else if (y_lower == y_upper) - weights = weightTables.mWeightsNB; - weights += offset_y * map_scale_factor * 4 + offset_x * 4; - - return e1 * weights[0] + e2 * weights[1] + e3 * weights[2] + e4 * weights[3]; -} - -uint32_t colorToRgba1010102(Color e_gamma) { - return (0x3ff & static_cast(e_gamma.r * 1023.0f)) | - ((0x3ff & static_cast(e_gamma.g * 1023.0f)) << 10) | - ((0x3ff & static_cast(e_gamma.b * 1023.0f)) << 20) | - (0x3 << 30); // Set alpha to 1.0 -} - -uint64_t colorToRgbaF16(Color e_gamma) { - return (uint64_t)floatToHalf(e_gamma.r) | (((uint64_t)floatToHalf(e_gamma.g)) << 16) | - (((uint64_t)floatToHalf(e_gamma.b)) << 32) | (((uint64_t)floatToHalf(1.0f)) << 48); -} - -} // namespace ultrahdr diff --git a/lib/gainmapmath.h b/lib/gainmapmath.h deleted file mode 100644 index bdbaf02..0000000 --- a/lib/gainmapmath.h +++ /dev/null @@ -1,498 +0,0 @@ -/* - * Copyright 2022 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ULTRAHDR_GAINMAPMATH_H -#define ULTRAHDR_GAINMAPMATH_H - -#include - -#include "ultrahdr.h" -#include "jpegr.h" - -#define CLIP3(x, min, max) ((x) < (min)) ? (min) : ((x) > (max)) ? (max) : (x) - -namespace ultrahdr { - -//////////////////////////////////////////////////////////////////////////////// -// Framework - -const float kSdrWhiteNits = 100.0f; -const float kHlgMaxNits = 1000.0f; -const float kPqMaxNits = 10000.0f; - -struct Color { - union { - struct { - float r; - float g; - float b; - }; - struct { - float y; - float u; - float v; - }; - }; -}; - -typedef Color (*ColorTransformFn)(Color); -typedef float (*ColorCalculationFn)(Color); - -// A transfer function mapping encoded values to linear values, -// represented by this 7-parameter piecewise function: -// -// linear = sign(encoded) * (c*|encoded| + f) , 0 <= |encoded| < d -// = sign(encoded) * ((a*|encoded| + b)^g + e), d <= |encoded| -// -// (A simple gamma transfer function sets g to gamma and a to 1.) -typedef struct TransferFunction { - float g, a, b, c, d, e, f; -} TransferFunction; - -static constexpr TransferFunction kSRGB_TransFun = { - 2.4f, (float)(1 / 1.055), (float)(0.055 / 1.055), (float)(1 / 12.92), 0.04045f, 0.0f, 0.0f}; - -static constexpr TransferFunction kLinear_TransFun = {1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}; - -inline Color operator+=(Color& lhs, const Color& rhs) { - lhs.r += rhs.r; - lhs.g += rhs.g; - lhs.b += rhs.b; - return lhs; -} -inline Color operator-=(Color& lhs, const Color& rhs) { - lhs.r -= rhs.r; - lhs.g -= rhs.g; - lhs.b -= rhs.b; - return lhs; -} - -inline Color operator+(const Color& lhs, const Color& rhs) { - Color temp = lhs; - return temp += rhs; -} -inline Color operator-(const Color& lhs, const Color& rhs) { - Color temp = lhs; - return temp -= rhs; -} - -inline Color operator+=(Color& lhs, const float rhs) { - lhs.r += rhs; - lhs.g += rhs; - lhs.b += rhs; - return lhs; -} -inline Color operator-=(Color& lhs, const float rhs) { - lhs.r -= rhs; - lhs.g -= rhs; - lhs.b -= rhs; - return lhs; -} -inline Color operator*=(Color& lhs, const float rhs) { - lhs.r *= rhs; - lhs.g *= rhs; - lhs.b *= rhs; - return lhs; -} -inline Color operator/=(Color& lhs, const float rhs) { - lhs.r /= rhs; - lhs.g /= rhs; - lhs.b /= rhs; - return lhs; -} - -inline Color operator+(const Color& lhs, const float rhs) { - Color temp = lhs; - return temp += rhs; -} -inline Color operator-(const Color& lhs, const float rhs) { - Color temp = lhs; - return temp -= rhs; -} -inline Color operator*(const Color& lhs, const float rhs) { - Color temp = lhs; - return temp *= rhs; -} -inline Color operator/(const Color& lhs, const float rhs) { - Color temp = lhs; - return temp /= rhs; -} - -inline uint16_t floatToHalf(float f) { - // round-to-nearest-even: add last bit after truncated mantissa - const uint32_t b = *((uint32_t*)&f) + 0x00001000; - - const int32_t e = (b & 0x7F800000) >> 23; // exponent - const uint32_t m = b & 0x007FFFFF; // mantissa - - // sign : normalized : denormalized : saturate - return (b & 0x80000000) >> 16 | (e > 112) * ((((e - 112) << 10) & 0x7C00) | m >> 13) | - ((e < 113) & (e > 101)) * ((((0x007FF000 + m) >> (125 - e)) + 1) >> 1) | - (e > 143) * 0x7FFF; -} - -constexpr size_t kGainFactorPrecision = 10; -constexpr size_t kGainFactorNumEntries = 1 << kGainFactorPrecision; -struct GainLUT { - GainLUT(ultrahdr_metadata_ptr metadata) { - for (size_t idx = 0; idx < kGainFactorNumEntries; idx++) { - float value = static_cast(idx) / static_cast(kGainFactorNumEntries - 1); - float logBoost = log2(metadata->minContentBoost) * (1.0f - value) + - log2(metadata->maxContentBoost) * value; - mGainTable[idx] = exp2(logBoost); - } - } - - GainLUT(ultrahdr_metadata_ptr metadata, float displayBoost) { - float boostFactor = displayBoost > 0 ? displayBoost / metadata->maxContentBoost : 1.0f; - for (size_t idx = 0; idx < kGainFactorNumEntries; idx++) { - float value = static_cast(idx) / static_cast(kGainFactorNumEntries - 1); - float logBoost = log2(metadata->minContentBoost) * (1.0f - value) + - log2(metadata->maxContentBoost) * value; - mGainTable[idx] = exp2(logBoost * boostFactor); - } - } - - ~GainLUT() {} - - float getGainFactor(float gain) { - uint32_t idx = static_cast(gain * (kGainFactorNumEntries - 1) + 0.5); - // TODO() : Remove once conversion modules have appropriate clamping in place - idx = CLIP3(idx, 0, kGainFactorNumEntries - 1); - return mGainTable[idx]; - } - - private: - float mGainTable[kGainFactorNumEntries]; -}; - -struct ShepardsIDW { - ShepardsIDW(int mapScaleFactor) : mMapScaleFactor{mapScaleFactor} { - const int size = mMapScaleFactor * mMapScaleFactor * 4; - mWeights = new float[size]; - mWeightsNR = new float[size]; - mWeightsNB = new float[size]; - mWeightsC = new float[size]; - fillShepardsIDW(mWeights, 1, 1); - fillShepardsIDW(mWeightsNR, 0, 1); - fillShepardsIDW(mWeightsNB, 1, 0); - fillShepardsIDW(mWeightsC, 0, 0); - } - ~ShepardsIDW() { - delete[] mWeights; - delete[] mWeightsNR; - delete[] mWeightsNB; - delete[] mWeightsC; - } - - int mMapScaleFactor; - // Image :- - // p00 p01 p02 p03 p04 p05 p06 p07 - // p10 p11 p12 p13 p14 p15 p16 p17 - // p20 p21 p22 p23 p24 p25 p26 p27 - // p30 p31 p32 p33 p34 p35 p36 p37 - // p40 p41 p42 p43 p44 p45 p46 p47 - // p50 p51 p52 p53 p54 p55 p56 p57 - // p60 p61 p62 p63 p64 p65 p66 p67 - // p70 p71 p72 p73 p74 p75 p76 p77 - - // Gain Map (for 4 scale factor) :- - // m00 p01 - // m10 m11 - - // Gain sample of curr 4x4, right 4x4, bottom 4x4, bottom right 4x4 are used during - // reconstruction. hence table weight size is 4. - float* mWeights; - // TODO: check if its ok to mWeights at places - float* mWeightsNR; // no right - float* mWeightsNB; // no bottom - float* mWeightsC; // no right & bottom - - float euclideanDistance(float x1, float x2, float y1, float y2); - void fillShepardsIDW(float* weights, int incR, int incB); -}; - -//////////////////////////////////////////////////////////////////////////////// -// sRGB transformations -// NOTE: sRGB has the same color primaries as BT.709, but different transfer -// function. For this reason, all sRGB transformations here apply to BT.709, -// except for those concerning transfer functions. - -/* - * Calculate the luminance of a linear RGB sRGB pixel, according to - * IEC 61966-2-1/Amd 1:2003. - * - * [0.0, 1.0] range in and out. - */ -float srgbLuminance(Color e); - -/* - * Convert from OETF'd srgb RGB to YUV, according to ITU-R BT.709-6. - * - * BT.709 YUV<->RGB matrix is used to match expectations for DataSpace. - */ -Color srgbRgbToYuv(Color e_gamma); - -/* - * Convert from OETF'd srgb YUV to RGB, according to ITU-R BT.709-6. - * - * BT.709 YUV<->RGB matrix is used to match expectations for DataSpace. - */ -Color srgbYuvToRgb(Color e_gamma); - -/* - * Convert from srgb to linear, according to IEC 61966-2-1/Amd 1:2003. - * - * [0.0, 1.0] range in and out. - */ -float srgbInvOetf(float e_gamma); -Color srgbInvOetf(Color e_gamma); -float srgbInvOetfLUT(float e_gamma); -Color srgbInvOetfLUT(Color e_gamma); - -constexpr size_t kSrgbInvOETFPrecision = 10; -constexpr size_t kSrgbInvOETFNumEntries = 1 << kSrgbInvOETFPrecision; - -//////////////////////////////////////////////////////////////////////////////// -// Display-P3 transformations - -/* - * Calculated the luminance of a linear RGB P3 pixel, according to SMPTE EG 432-1. - * - * [0.0, 1.0] range in and out. - */ -float p3Luminance(Color e); - -/* - * Convert from OETF'd P3 RGB to YUV, according to ITU-R BT.601-7. - * - * BT.601 YUV<->RGB matrix is used to match expectations for DataSpace. - */ -Color p3RgbToYuv(Color e_gamma); - -/* - * Convert from OETF'd P3 YUV to RGB, according to ITU-R BT.601-7. - * - * BT.601 YUV<->RGB matrix is used to match expectations for DataSpace. - */ -Color p3YuvToRgb(Color e_gamma); - -//////////////////////////////////////////////////////////////////////////////// -// BT.2100 transformations - according to ITU-R BT.2100-2 - -/* - * Calculate the luminance of a linear RGB BT.2100 pixel. - * - * [0.0, 1.0] range in and out. - */ -float bt2100Luminance(Color e); - -/* - * Convert from OETF'd BT.2100 RGB to YUV, according to ITU-R BT.2100-2. - * - * BT.2100 YUV<->RGB matrix is used to match expectations for DataSpace. - */ -Color bt2100RgbToYuv(Color e_gamma); - -/* - * Convert from OETF'd BT.2100 YUV to RGB, according to ITU-R BT.2100-2. - * - * BT.2100 YUV<->RGB matrix is used to match expectations for DataSpace. - */ -Color bt2100YuvToRgb(Color e_gamma); - -/* - * Convert from scene luminance to HLG. - * - * [0.0, 1.0] range in and out. - */ -float hlgOetf(float e); -Color hlgOetf(Color e); -float hlgOetfLUT(float e); -Color hlgOetfLUT(Color e); - -constexpr size_t kHlgOETFPrecision = 16; -constexpr size_t kHlgOETFNumEntries = 1 << kHlgOETFPrecision; - -/* - * Convert from HLG to scene luminance. - * - * [0.0, 1.0] range in and out. - */ -float hlgInvOetf(float e_gamma); -Color hlgInvOetf(Color e_gamma); -float hlgInvOetfLUT(float e_gamma); -Color hlgInvOetfLUT(Color e_gamma); - -constexpr size_t kHlgInvOETFPrecision = 12; -constexpr size_t kHlgInvOETFNumEntries = 1 << kHlgInvOETFPrecision; - -/* - * Convert from scene luminance to PQ. - * - * [0.0, 1.0] range in and out. - */ -float pqOetf(float e); -Color pqOetf(Color e); -float pqOetfLUT(float e); -Color pqOetfLUT(Color e); - -constexpr size_t kPqOETFPrecision = 16; -constexpr size_t kPqOETFNumEntries = 1 << kPqOETFPrecision; - -/* - * Convert from PQ to scene luminance in nits. - * - * [0.0, 1.0] range in and out. - */ -float pqInvOetf(float e_gamma); -Color pqInvOetf(Color e_gamma); -float pqInvOetfLUT(float e_gamma); -Color pqInvOetfLUT(Color e_gamma); - -constexpr size_t kPqInvOETFPrecision = 12; -constexpr size_t kPqInvOETFNumEntries = 1 << kPqInvOETFPrecision; - -//////////////////////////////////////////////////////////////////////////////// -// Color space conversions - -/* - * Convert between color spaces with linear RGB data, according to ITU-R BT.2407 and EG 432-1. - * - * All conversions are derived from multiplying the matrix for XYZ to output RGB color gamut by the - * matrix for input RGB color gamut to XYZ. The matrix for converting from XYZ to an RGB gamut is - * always the inverse of the RGB gamut to XYZ matrix. - */ -Color bt709ToP3(Color e); -Color bt709ToBt2100(Color e); -Color p3ToBt709(Color e); -Color p3ToBt2100(Color e); -Color bt2100ToBt709(Color e); -Color bt2100ToP3(Color e); - -/* - * Identity conversion. - */ -inline Color identityConversion(Color e) { return e; } - -/* - * Get the conversion to apply to the HDR image for gain map generation - */ -ColorTransformFn getHdrConversionFn(ultrahdr_color_gamut sdr_gamut, ultrahdr_color_gamut hdr_gamut); - -/* - * Convert between YUV encodings, according to ITU-R BT.709-6, ITU-R BT.601-7, and ITU-R BT.2100-2. - * - * Bt.709 and Bt.2100 have well-defined YUV encodings; Display-P3's is less well defined, but is - * treated as Bt.601 by DataSpace, hence we do the same. - */ -Color yuv709To601(Color e_gamma); -Color yuv709To2100(Color e_gamma); -Color yuv601To709(Color e_gamma); -Color yuv601To2100(Color e_gamma); -Color yuv2100To709(Color e_gamma); -Color yuv2100To601(Color e_gamma); - -/* - * Performs a transformation at the chroma x and y coordinates provided on a YUV420 image. - * - * Apply the transformation by determining transformed YUV for each of the 4 Y + 1 UV; each Y gets - * this result, and UV gets the averaged result. - * - * x_chroma and y_chroma should be less than or equal to half the image's width and height - * respecitively, since input is 4:2:0 subsampled. - */ -void transformYuv420(jr_uncompressed_ptr image, size_t x_chroma, size_t y_chroma, - ColorTransformFn fn); - -//////////////////////////////////////////////////////////////////////////////// -// Gain map calculations - -/* - * Calculate the 8-bit unsigned integer gain value for the given SDR and HDR - * luminances in linear space, and the hdr ratio to encode against. - * - * Note: since this library always uses gamma of 1.0, offsetSdr of 0.0, and - * offsetHdr of 0.0, this function doesn't handle different metadata values for - * these fields. - */ -uint8_t encodeGain(float y_sdr, float y_hdr, ultrahdr_metadata_ptr metadata); -uint8_t encodeGain(float y_sdr, float y_hdr, ultrahdr_metadata_ptr metadata, - float log2MinContentBoost, float log2MaxContentBoost); - -/* - * Calculates the linear luminance in nits after applying the given gain - * value, with the given hdr ratio, to the given sdr input in the range [0, 1]. - * - * Note: similar to encodeGain(), this function only supports gamma 1.0, - * offsetSdr 0.0, offsetHdr 0.0, hdrCapacityMin 1.0, and hdrCapacityMax equal to - * gainMapMax, as this library encodes. - */ -Color applyGain(Color e, float gain, ultrahdr_metadata_ptr metadata); -Color applyGain(Color e, float gain, ultrahdr_metadata_ptr metadata, float displayBoost); -Color applyGainLUT(Color e, float gain, GainLUT& gainLUT); - -/* - * Helper for sampling from YUV 420 images. - */ -Color getYuv420Pixel(jr_uncompressed_ptr image, size_t x, size_t y); - -/* - * Helper for sampling from P010 images. - * - * Expect narrow-range image data for P010. - */ -Color getP010Pixel(jr_uncompressed_ptr image, size_t x, size_t y); - -/* - * Sample the image at the provided location, with a weighting based on nearby - * pixels and the map scale factor. - */ -Color sampleYuv420(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y); - -/* - * Sample the image at the provided location, with a weighting based on nearby - * pixels and the map scale factor. - * - * Expect narrow-range image data for P010. - */ -Color sampleP010(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y); - -/* - * Sample the gain value for the map from a given x,y coordinate on a scale - * that is map scale factor larger than the map size. - */ -float sampleMap(jr_uncompressed_ptr map, float map_scale_factor, size_t x, size_t y); -float sampleMap(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y, - ShepardsIDW& weightTables); - -/* - * Convert from Color to RGBA1010102. - * - * Alpha always set to 1.0. - */ -uint32_t colorToRgba1010102(Color e_gamma); - -/* - * Convert from Color to F16. - * - * Alpha always set to 1.0. - */ -uint64_t colorToRgbaF16(Color e_gamma); - -} // namespace ultrahdr - -#endif // ULTRAHDR_GAINMAPMATH_H diff --git a/lib/icc.cpp b/lib/icc.cpp deleted file mode 100644 index 851dd9d..0000000 --- a/lib/icc.cpp +++ /dev/null @@ -1,680 +0,0 @@ -/* - * Copyright 2022 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include "ultrahdrcommon.h" -#include "icc.h" - -namespace ultrahdr { - -static void Matrix3x3_apply(const Matrix3x3* m, float* x) { - float y0 = x[0] * m->vals[0][0] + x[1] * m->vals[0][1] + x[2] * m->vals[0][2]; - float y1 = x[0] * m->vals[1][0] + x[1] * m->vals[1][1] + x[2] * m->vals[1][2]; - float y2 = x[0] * m->vals[2][0] + x[1] * m->vals[2][1] + x[2] * m->vals[2][2]; - x[0] = y0; - x[1] = y1; - x[2] = y2; -} - -bool Matrix3x3_invert(const Matrix3x3* src, Matrix3x3* dst) { - double a00 = src->vals[0][0]; - double a01 = src->vals[1][0]; - double a02 = src->vals[2][0]; - double a10 = src->vals[0][1]; - double a11 = src->vals[1][1]; - double a12 = src->vals[2][1]; - double a20 = src->vals[0][2]; - double a21 = src->vals[1][2]; - double a22 = src->vals[2][2]; - - double b0 = a00 * a11 - a01 * a10; - double b1 = a00 * a12 - a02 * a10; - double b2 = a01 * a12 - a02 * a11; - double b3 = a20; - double b4 = a21; - double b5 = a22; - - double determinant = b0 * b5 - b1 * b4 + b2 * b3; - - if (determinant == 0) { - return false; - } - - double invdet = 1.0 / determinant; - if (invdet > +FLT_MAX || invdet < -FLT_MAX || !isfinitef_((float)invdet)) { - return false; - } - - b0 *= invdet; - b1 *= invdet; - b2 *= invdet; - b3 *= invdet; - b4 *= invdet; - b5 *= invdet; - - dst->vals[0][0] = (float)(a11 * b5 - a12 * b4); - dst->vals[1][0] = (float)(a02 * b4 - a01 * b5); - dst->vals[2][0] = (float)(+b2); - dst->vals[0][1] = (float)(a12 * b3 - a10 * b5); - dst->vals[1][1] = (float)(a00 * b5 - a02 * b3); - dst->vals[2][1] = (float)(-b1); - dst->vals[0][2] = (float)(a10 * b4 - a11 * b3); - dst->vals[1][2] = (float)(a01 * b3 - a00 * b4); - dst->vals[2][2] = (float)(+b0); - - for (int r = 0; r < 3; ++r) - for (int c = 0; c < 3; ++c) { - if (!isfinitef_(dst->vals[r][c])) { - return false; - } - } - return true; -} - -static Matrix3x3 Matrix3x3_concat(const Matrix3x3* A, const Matrix3x3* B) { - Matrix3x3 m = {{{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}}; - for (int r = 0; r < 3; r++) - for (int c = 0; c < 3; c++) { - m.vals[r][c] = A->vals[r][0] * B->vals[0][c] + A->vals[r][1] * B->vals[1][c] + - A->vals[r][2] * B->vals[2][c]; - } - return m; -} - -static void float_XYZD50_to_grid16_lab(const float* xyz_float, uint8_t* grid16_lab) { - float v[3] = { - xyz_float[0] / kD50_x, - xyz_float[1] / kD50_y, - xyz_float[2] / kD50_z, - }; - for (size_t i = 0; i < 3; ++i) { - v[i] = v[i] > 0.008856f ? cbrtf(v[i]) : v[i] * 7.787f + (16 / 116.0f); - } - const float L = v[1] * 116.0f - 16.0f; - const float a = (v[0] - v[1]) * 500.0f; - const float b = (v[1] - v[2]) * 200.0f; - const float Lab_unorm[3] = { - L * (1 / 100.f), - (a + 128.0f) * (1 / 255.0f), - (b + 128.0f) * (1 / 255.0f), - }; - // This will encode L=1 as 0xFFFF. This matches how skcms will interpret the - // table, but the spec appears to indicate that the value should be 0xFF00. - // https://crbug.com/skia/13807 - for (size_t i = 0; i < 3; ++i) { - reinterpret_cast(grid16_lab)[i] = - Endian_SwapBE16(float_round_to_unorm16(Lab_unorm[i])); - } -} - -std::string IccHelper::get_desc_string(const ultrahdr_transfer_function tf, - const ultrahdr_color_gamut gamut) { - std::string result; - switch (gamut) { - case ULTRAHDR_COLORGAMUT_BT709: - result += "sRGB"; - break; - case ULTRAHDR_COLORGAMUT_P3: - result += "Display P3"; - break; - case ULTRAHDR_COLORGAMUT_BT2100: - result += "Rec2020"; - break; - default: - result += "Unknown"; - break; - } - result += " Gamut with "; - switch (tf) { - case ULTRAHDR_TF_SRGB: - result += "sRGB"; - break; - case ULTRAHDR_TF_LINEAR: - result += "Linear"; - break; - case ULTRAHDR_TF_PQ: - result += "PQ"; - break; - case ULTRAHDR_TF_HLG: - result += "HLG"; - break; - default: - result += "Unknown"; - break; - } - result += " Transfer"; - return result; -} - -std::shared_ptr IccHelper::write_text_tag(const char* text) { - uint32_t text_length = strlen(text); - uint32_t header[] = { - Endian_SwapBE32(kTAG_TextType), // Type signature - 0, // Reserved - Endian_SwapBE32(1), // Number of records - Endian_SwapBE32(12), // Record size (must be 12) - Endian_SwapBE32(SetFourByteTag('e', 'n', 'U', 'S')), // English USA - Endian_SwapBE32(2 * text_length), // Length of string in bytes - Endian_SwapBE32(28), // Offset of string - }; - - uint32_t total_length = text_length * 2 + sizeof(header); - total_length = (((total_length + 2) >> 2) << 2); // 4 aligned - std::shared_ptr dataStruct = std::make_shared(total_length); - - if (!dataStruct->write(header, sizeof(header))) { - ALOGE("write_text_tag(): error in writing data"); - return dataStruct; - } - - for (size_t i = 0; i < text_length; i++) { - // Convert ASCII to big-endian UTF-16. - dataStruct->write8(0); - dataStruct->write8(text[i]); - } - - return dataStruct; -} - -std::shared_ptr IccHelper::write_xyz_tag(float x, float y, float z) { - uint32_t data[] = { - Endian_SwapBE32(kXYZ_PCSSpace), - 0, - static_cast(Endian_SwapBE32(float_round_to_fixed(x))), - static_cast(Endian_SwapBE32(float_round_to_fixed(y))), - static_cast(Endian_SwapBE32(float_round_to_fixed(z))), - }; - std::shared_ptr dataStruct = std::make_shared(sizeof(data)); - dataStruct->write(&data, sizeof(data)); - return dataStruct; -} - -std::shared_ptr IccHelper::write_trc_tag(const int table_entries, - const void* table_16) { - int total_length = 4 + 4 + 4 + table_entries * 2; - total_length = (((total_length + 2) >> 2) << 2); // 4 aligned - std::shared_ptr dataStruct = std::make_shared(total_length); - dataStruct->write32(Endian_SwapBE32(kTAG_CurveType)); // Type - dataStruct->write32(0); // Reserved - dataStruct->write32(Endian_SwapBE32(table_entries)); // Value count - for (int i = 0; i < table_entries; ++i) { - uint16_t value = reinterpret_cast(table_16)[i]; - dataStruct->write16(value); - } - return dataStruct; -} - -std::shared_ptr IccHelper::write_trc_tag(const TransferFunction& fn) { - if (fn.a == 1.f && fn.b == 0.f && fn.c == 0.f && fn.d == 0.f && fn.e == 0.f && fn.f == 0.f) { - int total_length = 16; - std::shared_ptr dataStruct = std::make_shared(total_length); - dataStruct->write32(Endian_SwapBE32(kTAG_ParaCurveType)); // Type - dataStruct->write32(0); // Reserved - dataStruct->write32(Endian_SwapBE16(kExponential_ParaCurveType)); - dataStruct->write32(Endian_SwapBE32(float_round_to_fixed(fn.g))); - return dataStruct; - } - - int total_length = 40; - std::shared_ptr dataStruct = std::make_shared(total_length); - dataStruct->write32(Endian_SwapBE32(kTAG_ParaCurveType)); // Type - dataStruct->write32(0); // Reserved - dataStruct->write32(Endian_SwapBE16(kGABCDEF_ParaCurveType)); - dataStruct->write32(Endian_SwapBE32(float_round_to_fixed(fn.g))); - dataStruct->write32(Endian_SwapBE32(float_round_to_fixed(fn.a))); - dataStruct->write32(Endian_SwapBE32(float_round_to_fixed(fn.b))); - dataStruct->write32(Endian_SwapBE32(float_round_to_fixed(fn.c))); - dataStruct->write32(Endian_SwapBE32(float_round_to_fixed(fn.d))); - dataStruct->write32(Endian_SwapBE32(float_round_to_fixed(fn.e))); - dataStruct->write32(Endian_SwapBE32(float_round_to_fixed(fn.f))); - return dataStruct; -} - -float IccHelper::compute_tone_map_gain(const ultrahdr_transfer_function tf, float L) { - if (L <= 0.f) { - return 1.f; - } - if (tf == ULTRAHDR_TF_PQ) { - // The PQ transfer function will map to the range [0, 1]. Linearly scale - // it up to the range [0, 10,000/203]. We will then tone map that back - // down to [0, 1]. - constexpr float kInputMaxLuminance = 10000 / 203.f; - constexpr float kOutputMaxLuminance = 1.0; - L *= kInputMaxLuminance; - - // Compute the tone map gain which will tone map from 10,000/203 to 1.0. - constexpr float kToneMapA = kOutputMaxLuminance / (kInputMaxLuminance * kInputMaxLuminance); - constexpr float kToneMapB = 1.f / kOutputMaxLuminance; - return kInputMaxLuminance * (1.f + kToneMapA * L) / (1.f + kToneMapB * L); - } - if (tf == ULTRAHDR_TF_HLG) { - // Let Lw be the brightness of the display in nits. - constexpr float Lw = 203.f; - const float gamma = 1.2f + 0.42f * std::log(Lw / 1000.f) / std::log(10.f); - return std::pow(L, gamma - 1.f); - } - return 1.f; -} - -std::shared_ptr IccHelper::write_cicp_tag(uint32_t color_primaries, - uint32_t transfer_characteristics) { - int total_length = 12; // 4 + 4 + 1 + 1 + 1 + 1 - std::shared_ptr dataStruct = std::make_shared(total_length); - dataStruct->write32(Endian_SwapBE32(kTAG_cicp)); // Type signature - dataStruct->write32(0); // Reserved - dataStruct->write8(color_primaries); // Color primaries - dataStruct->write8(transfer_characteristics); // Transfer characteristics - dataStruct->write8(0); // RGB matrix - dataStruct->write8(1); // Full range - return dataStruct; -} - -void IccHelper::compute_lut_entry(const Matrix3x3& src_to_XYZD50, float rgb[3]) { - // Compute the matrices to convert from source to Rec2020, and from Rec2020 to XYZD50. - Matrix3x3 src_to_rec2020; - const Matrix3x3 rec2020_to_XYZD50 = kRec2020; - { - Matrix3x3 XYZD50_to_rec2020; - Matrix3x3_invert(&rec2020_to_XYZD50, &XYZD50_to_rec2020); - src_to_rec2020 = Matrix3x3_concat(&XYZD50_to_rec2020, &src_to_XYZD50); - } - - // Convert the source signal to linear. - for (size_t i = 0; i < kNumChannels; ++i) { - rgb[i] = pqOetf(rgb[i]); - } - - // Convert source gamut to Rec2020. - Matrix3x3_apply(&src_to_rec2020, rgb); - - // Compute the luminance of the signal. - float L = bt2100Luminance({{{rgb[0], rgb[1], rgb[2]}}}); - - // Compute the tone map gain based on the luminance. - float tone_map_gain = compute_tone_map_gain(ULTRAHDR_TF_PQ, L); - - // Apply the tone map gain. - for (size_t i = 0; i < kNumChannels; ++i) { - rgb[i] *= tone_map_gain; - } - - // Convert from Rec2020-linear to XYZD50. - Matrix3x3_apply(&rec2020_to_XYZD50, rgb); -} - -std::shared_ptr IccHelper::write_clut(const uint8_t* grid_points, - const uint8_t* grid_16) { - uint32_t value_count = kNumChannels; - for (uint32_t i = 0; i < kNumChannels; ++i) { - value_count *= grid_points[i]; - } - - int total_length = 20 + 2 * value_count; - total_length = (((total_length + 2) >> 2) << 2); // 4 aligned - std::shared_ptr dataStruct = std::make_shared(total_length); - - for (size_t i = 0; i < 16; ++i) { - dataStruct->write8(i < kNumChannels ? grid_points[i] : 0); // Grid size - } - dataStruct->write8(2); // Grid byte width (always 16-bit) - dataStruct->write8(0); // Reserved - dataStruct->write8(0); // Reserved - dataStruct->write8(0); // Reserved - - for (uint32_t i = 0; i < value_count; ++i) { - uint16_t value = reinterpret_cast(grid_16)[i]; - dataStruct->write16(value); - } - - return dataStruct; -} - -std::shared_ptr IccHelper::write_mAB_or_mBA_tag(uint32_t type, bool has_a_curves, - const uint8_t* grid_points, - const uint8_t* grid_16) { - const size_t b_curves_offset = 32; - std::shared_ptr b_curves_data[kNumChannels]; - std::shared_ptr a_curves_data[kNumChannels]; - size_t clut_offset = 0; - std::shared_ptr clut; - size_t a_curves_offset = 0; - - // The "B" curve is required. - for (size_t i = 0; i < kNumChannels; ++i) { - b_curves_data[i] = write_trc_tag(kLinear_TransFun); - } - - // The "A" curve and CLUT are optional. - if (has_a_curves) { - clut_offset = b_curves_offset; - for (size_t i = 0; i < kNumChannels; ++i) { - clut_offset += b_curves_data[i]->getLength(); - } - clut = write_clut(grid_points, grid_16); - - a_curves_offset = clut_offset + clut->getLength(); - for (size_t i = 0; i < kNumChannels; ++i) { - a_curves_data[i] = write_trc_tag(kLinear_TransFun); - } - } - - int total_length = b_curves_offset; - for (size_t i = 0; i < kNumChannels; ++i) { - total_length += b_curves_data[i]->getLength(); - } - if (has_a_curves) { - total_length += clut->getLength(); - for (size_t i = 0; i < kNumChannels; ++i) { - total_length += a_curves_data[i]->getLength(); - } - } - std::shared_ptr dataStruct = std::make_shared(total_length); - dataStruct->write32(Endian_SwapBE32(type)); // Type signature - dataStruct->write32(0); // Reserved - dataStruct->write8(kNumChannels); // Input channels - dataStruct->write8(kNumChannels); // Output channels - dataStruct->write16(0); // Reserved - dataStruct->write32(Endian_SwapBE32(b_curves_offset)); // B curve offset - dataStruct->write32(Endian_SwapBE32(0)); // Matrix offset (ignored) - dataStruct->write32(Endian_SwapBE32(0)); // M curve offset (ignored) - dataStruct->write32(Endian_SwapBE32(clut_offset)); // CLUT offset - dataStruct->write32(Endian_SwapBE32(a_curves_offset)); // A curve offset - for (size_t i = 0; i < kNumChannels; ++i) { - if (dataStruct->write(b_curves_data[i]->getData(), b_curves_data[i]->getLength())) { - return dataStruct; - } - } - if (has_a_curves) { - dataStruct->write(clut->getData(), clut->getLength()); - for (size_t i = 0; i < kNumChannels; ++i) { - dataStruct->write(a_curves_data[i]->getData(), a_curves_data[i]->getLength()); - } - } - return dataStruct; -} - -std::shared_ptr IccHelper::writeIccProfile(ultrahdr_transfer_function tf, - ultrahdr_color_gamut gamut) { - ICCHeader header; - - std::vector>> tags; - - // Compute profile description tag - std::string desc = get_desc_string(tf, gamut); - - tags.emplace_back(kTAG_desc, write_text_tag(desc.c_str())); - - Matrix3x3 toXYZD50; - switch (gamut) { - case ULTRAHDR_COLORGAMUT_BT709: - toXYZD50 = kSRGB; - break; - case ULTRAHDR_COLORGAMUT_P3: - toXYZD50 = kDisplayP3; - break; - case ULTRAHDR_COLORGAMUT_BT2100: - toXYZD50 = kRec2020; - break; - default: - // Should not fall here. - return nullptr; - } - - // Compute primaries. - { - tags.emplace_back(kTAG_rXYZ, - write_xyz_tag(toXYZD50.vals[0][0], toXYZD50.vals[1][0], toXYZD50.vals[2][0])); - tags.emplace_back(kTAG_gXYZ, - write_xyz_tag(toXYZD50.vals[0][1], toXYZD50.vals[1][1], toXYZD50.vals[2][1])); - tags.emplace_back(kTAG_bXYZ, - write_xyz_tag(toXYZD50.vals[0][2], toXYZD50.vals[1][2], toXYZD50.vals[2][2])); - } - - // Compute white point tag (must be D50) - tags.emplace_back(kTAG_wtpt, write_xyz_tag(kD50_x, kD50_y, kD50_z)); - - // Compute transfer curves. - if (tf != ULTRAHDR_TF_PQ) { - if (tf == ULTRAHDR_TF_HLG) { - std::vector trc_table; - trc_table.resize(kTrcTableSize * 2); - for (uint32_t i = 0; i < kTrcTableSize; ++i) { - float x = i / (kTrcTableSize - 1.f); - float y = hlgOetf(x); - y *= compute_tone_map_gain(tf, y); - float_to_table16(y, &trc_table[2 * i]); - } - - tags.emplace_back(kTAG_rTRC, - write_trc_tag(kTrcTableSize, reinterpret_cast(trc_table.data()))); - tags.emplace_back(kTAG_gTRC, - write_trc_tag(kTrcTableSize, reinterpret_cast(trc_table.data()))); - tags.emplace_back(kTAG_bTRC, - write_trc_tag(kTrcTableSize, reinterpret_cast(trc_table.data()))); - } else { - tags.emplace_back(kTAG_rTRC, write_trc_tag(kSRGB_TransFun)); - tags.emplace_back(kTAG_gTRC, write_trc_tag(kSRGB_TransFun)); - tags.emplace_back(kTAG_bTRC, write_trc_tag(kSRGB_TransFun)); - } - } - - // Compute CICP. - if (tf == ULTRAHDR_TF_HLG || tf == ULTRAHDR_TF_PQ) { - // The CICP tag is present in ICC 4.4, so update the header's version. - header.version = Endian_SwapBE32(0x04400000); - - uint32_t color_primaries = 0; - if (gamut == ULTRAHDR_COLORGAMUT_BT709) { - color_primaries = kCICPPrimariesSRGB; - } else if (gamut == ULTRAHDR_COLORGAMUT_P3) { - color_primaries = kCICPPrimariesP3; - } - - uint32_t transfer_characteristics = 0; - if (tf == ULTRAHDR_TF_SRGB) { - transfer_characteristics = kCICPTrfnSRGB; - } else if (tf == ULTRAHDR_TF_LINEAR) { - transfer_characteristics = kCICPTrfnLinear; - } else if (tf == ULTRAHDR_TF_PQ) { - transfer_characteristics = kCICPTrfnPQ; - } else if (tf == ULTRAHDR_TF_HLG) { - transfer_characteristics = kCICPTrfnHLG; - } - tags.emplace_back(kTAG_cicp, write_cicp_tag(color_primaries, transfer_characteristics)); - } - - // Compute A2B0. - if (tf == ULTRAHDR_TF_PQ) { - std::vector a2b_grid; - a2b_grid.resize(kGridSize * kGridSize * kGridSize * kNumChannels * 2); - size_t a2b_grid_index = 0; - for (uint32_t r_index = 0; r_index < kGridSize; ++r_index) { - for (uint32_t g_index = 0; g_index < kGridSize; ++g_index) { - for (uint32_t b_index = 0; b_index < kGridSize; ++b_index) { - float rgb[3] = { - r_index / (kGridSize - 1.f), - g_index / (kGridSize - 1.f), - b_index / (kGridSize - 1.f), - }; - compute_lut_entry(toXYZD50, rgb); - float_XYZD50_to_grid16_lab(rgb, &a2b_grid[a2b_grid_index]); - a2b_grid_index += 6; - } - } - } - const uint8_t* grid_16 = reinterpret_cast(a2b_grid.data()); - - uint8_t grid_points[kNumChannels]; - for (size_t i = 0; i < kNumChannels; ++i) { - grid_points[i] = kGridSize; - } - - auto a2b_data = write_mAB_or_mBA_tag(kTAG_mABType, - /* has_a_curves */ true, grid_points, grid_16); - tags.emplace_back(kTAG_A2B0, std::move(a2b_data)); - } - - // Compute B2A0. - if (tf == ULTRAHDR_TF_PQ) { - auto b2a_data = write_mAB_or_mBA_tag(kTAG_mBAType, - /* has_a_curves */ false, - /* grid_points */ nullptr, - /* grid_16 */ nullptr); - tags.emplace_back(kTAG_B2A0, std::move(b2a_data)); - } - - // Compute copyright tag - tags.emplace_back(kTAG_cprt, write_text_tag("Google Inc. 2022")); - - // Compute the size of the profile. - size_t tag_data_size = 0; - for (const auto& tag : tags) { - tag_data_size += tag.second->getLength(); - } - size_t tag_table_size = kICCTagTableEntrySize * tags.size(); - size_t profile_size = kICCHeaderSize + tag_table_size + tag_data_size; - - std::shared_ptr dataStruct = - std::make_shared(profile_size + kICCIdentifierSize); - - // Write identifier, chunk count, and chunk ID - if (!dataStruct->write(kICCIdentifier, sizeof(kICCIdentifier)) || !dataStruct->write8(1) || - !dataStruct->write8(1)) { - ALOGE("writeIccProfile(): error in identifier"); - return dataStruct; - } - - // Write the header. - header.data_color_space = Endian_SwapBE32(Signature_RGB); - header.pcs = Endian_SwapBE32(tf == ULTRAHDR_TF_PQ ? Signature_Lab : Signature_XYZ); - header.size = Endian_SwapBE32(profile_size); - header.tag_count = Endian_SwapBE32(tags.size()); - - if (!dataStruct->write(&header, sizeof(header))) { - ALOGE("writeIccProfile(): error in header"); - return dataStruct; - } - - // Write the tag table. Track the offset and size of the previous tag to - // compute each tag's offset. An empty SkData indicates that the previous - // tag is to be reused. - uint32_t last_tag_offset = sizeof(header) + tag_table_size; - uint32_t last_tag_size = 0; - for (const auto& tag : tags) { - last_tag_offset = last_tag_offset + last_tag_size; - last_tag_size = tag.second->getLength(); - uint32_t tag_table_entry[3] = { - Endian_SwapBE32(tag.first), - Endian_SwapBE32(last_tag_offset), - Endian_SwapBE32(last_tag_size), - }; - if (!dataStruct->write(tag_table_entry, sizeof(tag_table_entry))) { - ALOGE("writeIccProfile(): error in writing tag table"); - return dataStruct; - } - } - - // Write the tags. - for (const auto& tag : tags) { - if (!dataStruct->write(tag.second->getData(), tag.second->getLength())) { - ALOGE("writeIccProfile(): error in writing tags"); - return dataStruct; - } - } - - return dataStruct; -} - -bool IccHelper::tagsEqualToMatrix(const Matrix3x3& matrix, const uint8_t* red_tag, - const uint8_t* green_tag, const uint8_t* blue_tag) { - std::shared_ptr red_tag_test = - write_xyz_tag(matrix.vals[0][0], matrix.vals[1][0], matrix.vals[2][0]); - std::shared_ptr green_tag_test = - write_xyz_tag(matrix.vals[0][1], matrix.vals[1][1], matrix.vals[2][1]); - std::shared_ptr blue_tag_test = - write_xyz_tag(matrix.vals[0][2], matrix.vals[1][2], matrix.vals[2][2]); - return memcmp(red_tag, red_tag_test->getData(), kColorantTagSize) == 0 && - memcmp(green_tag, green_tag_test->getData(), kColorantTagSize) == 0 && - memcmp(blue_tag, blue_tag_test->getData(), kColorantTagSize) == 0; -} - -ultrahdr_color_gamut IccHelper::readIccColorGamut(void* icc_data, size_t icc_size) { - // Each tag table entry consists of 3 fields of 4 bytes each. - static const size_t kTagTableEntrySize = 12; - - if (icc_data == nullptr || icc_size < sizeof(ICCHeader) + kICCIdentifierSize) { - return ULTRAHDR_COLORGAMUT_UNSPECIFIED; - } - - if (memcmp(icc_data, kICCIdentifier, sizeof(kICCIdentifier)) != 0) { - return ULTRAHDR_COLORGAMUT_UNSPECIFIED; - } - - uint8_t* icc_bytes = reinterpret_cast(icc_data) + kICCIdentifierSize; - - ICCHeader* header = reinterpret_cast(icc_bytes); - - // Use 0 to indicate not found, since offsets are always relative to start - // of ICC data and therefore a tag offset of zero would never be valid. - size_t red_primary_offset = 0, green_primary_offset = 0, blue_primary_offset = 0; - size_t red_primary_size = 0, green_primary_size = 0, blue_primary_size = 0; - for (size_t tag_idx = 0; tag_idx < Endian_SwapBE32(header->tag_count); ++tag_idx) { - uint32_t* tag_entry_start = - reinterpret_cast(icc_bytes + sizeof(ICCHeader) + tag_idx * kTagTableEntrySize); - // first 4 bytes are the tag signature, next 4 bytes are the tag offset, - // last 4 bytes are the tag length in bytes. - if (red_primary_offset == 0 && *tag_entry_start == Endian_SwapBE32(kTAG_rXYZ)) { - red_primary_offset = Endian_SwapBE32(*(tag_entry_start + 1)); - red_primary_size = Endian_SwapBE32(*(tag_entry_start + 2)); - } else if (green_primary_offset == 0 && *tag_entry_start == Endian_SwapBE32(kTAG_gXYZ)) { - green_primary_offset = Endian_SwapBE32(*(tag_entry_start + 1)); - green_primary_size = Endian_SwapBE32(*(tag_entry_start + 2)); - } else if (blue_primary_offset == 0 && *tag_entry_start == Endian_SwapBE32(kTAG_bXYZ)) { - blue_primary_offset = Endian_SwapBE32(*(tag_entry_start + 1)); - blue_primary_size = Endian_SwapBE32(*(tag_entry_start + 2)); - } - } - - if (red_primary_offset == 0 || red_primary_size != kColorantTagSize || - kICCIdentifierSize + red_primary_offset + red_primary_size > icc_size || - green_primary_offset == 0 || green_primary_size != kColorantTagSize || - kICCIdentifierSize + green_primary_offset + green_primary_size > icc_size || - blue_primary_offset == 0 || blue_primary_size != kColorantTagSize || - kICCIdentifierSize + blue_primary_offset + blue_primary_size > icc_size) { - return ULTRAHDR_COLORGAMUT_UNSPECIFIED; - } - - uint8_t* red_tag = icc_bytes + red_primary_offset; - uint8_t* green_tag = icc_bytes + green_primary_offset; - uint8_t* blue_tag = icc_bytes + blue_primary_offset; - - // Serialize tags as we do on encode and compare what we find to that to - // determine the gamut (since we don't have a need yet for full deserialize). - if (tagsEqualToMatrix(kSRGB, red_tag, green_tag, blue_tag)) { - return ULTRAHDR_COLORGAMUT_BT709; - } else if (tagsEqualToMatrix(kDisplayP3, red_tag, green_tag, blue_tag)) { - return ULTRAHDR_COLORGAMUT_P3; - } else if (tagsEqualToMatrix(kRec2020, red_tag, green_tag, blue_tag)) { - return ULTRAHDR_COLORGAMUT_BT2100; - } - - // Didn't find a match to one of the profiles we write; indicate the gamut - // is unspecified since we don't understand it. - return ULTRAHDR_COLORGAMUT_UNSPECIFIED; -} - -} // namespace ultrahdr diff --git a/lib/icc.h b/lib/icc.h deleted file mode 100644 index a0b4680..0000000 --- a/lib/icc.h +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Copyright 2022 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ULTRAHDR_ICC_H -#define ULTRAHDR_ICC_H - -#include - -#ifndef USE_BIG_ENDIAN_IN_ICC -#define USE_BIG_ENDIAN_IN_ICC true -#endif - -#undef Endian_SwapBE32 -#undef Endian_SwapBE16 -#if USE_BIG_ENDIAN_IN_ICC -#define Endian_SwapBE32(n) EndianSwap32(n) -#define Endian_SwapBE16(n) EndianSwap16(n) -#else -#define Endian_SwapBE32(n) (n) -#define Endian_SwapBE16(n) (n) -#endif - -#include "ultrahdr.h" -#include "jpegr.h" -#include "gainmapmath.h" -#include "jpegrutils.h" - -namespace ultrahdr { - -typedef int32_t Fixed; -#define Fixed1 (1 << 16) -#define MaxS32FitsInFloat 2147483520 -#define MinS32FitsInFloat (-MaxS32FitsInFloat) -#define FixedToFloat(x) ((x)*1.52587890625e-5f) - -typedef struct Matrix3x3 { - float vals[3][3]; -} Matrix3x3; - -// The D50 illuminant. -constexpr float kD50_x = 0.9642f; -constexpr float kD50_y = 1.0000f; -constexpr float kD50_z = 0.8249f; - -enum { - // data_color_space - Signature_CMYK = 0x434D594B, - Signature_Gray = 0x47524159, - Signature_RGB = 0x52474220, - - // pcs - Signature_Lab = 0x4C616220, - Signature_XYZ = 0x58595A20, -}; - -typedef uint32_t FourByteTag; -static inline constexpr FourByteTag SetFourByteTag(char a, char b, char c, char d) { - return (((uint32_t)a << 24) | ((uint32_t)b << 16) | ((uint32_t)c << 8) | (uint32_t)d); -} - -static constexpr char kICCIdentifier[] = "ICC_PROFILE"; -// 12 for the actual identifier, +2 for the chunk count and chunk index which -// will always follow. -static constexpr size_t kICCIdentifierSize = 14; - -// This is equal to the header size according to the ICC specification (128) -// plus the size of the tag count (4). We include the tag count since we -// always require it to be present anyway. -static constexpr size_t kICCHeaderSize = 132; - -// Contains a signature (4), offset (4), and size (4). -static constexpr size_t kICCTagTableEntrySize = 12; - -// size should be 20; 4 bytes for type descriptor, 4 bytes reserved, 12 -// bytes for a single XYZ number type (4 bytes per coordinate). -static constexpr size_t kColorantTagSize = 20; - -static constexpr uint32_t kDisplay_Profile = SetFourByteTag('m', 'n', 't', 'r'); -static constexpr uint32_t kRGB_ColorSpace = SetFourByteTag('R', 'G', 'B', ' '); -static constexpr uint32_t kXYZ_PCSSpace = SetFourByteTag('X', 'Y', 'Z', ' '); -static constexpr uint32_t kACSP_Signature = SetFourByteTag('a', 'c', 's', 'p'); - -static constexpr uint32_t kTAG_desc = SetFourByteTag('d', 'e', 's', 'c'); -static constexpr uint32_t kTAG_TextType = SetFourByteTag('m', 'l', 'u', 'c'); -static constexpr uint32_t kTAG_rXYZ = SetFourByteTag('r', 'X', 'Y', 'Z'); -static constexpr uint32_t kTAG_gXYZ = SetFourByteTag('g', 'X', 'Y', 'Z'); -static constexpr uint32_t kTAG_bXYZ = SetFourByteTag('b', 'X', 'Y', 'Z'); -static constexpr uint32_t kTAG_wtpt = SetFourByteTag('w', 't', 'p', 't'); -static constexpr uint32_t kTAG_rTRC = SetFourByteTag('r', 'T', 'R', 'C'); -static constexpr uint32_t kTAG_gTRC = SetFourByteTag('g', 'T', 'R', 'C'); -static constexpr uint32_t kTAG_bTRC = SetFourByteTag('b', 'T', 'R', 'C'); -static constexpr uint32_t kTAG_cicp = SetFourByteTag('c', 'i', 'c', 'p'); -static constexpr uint32_t kTAG_cprt = SetFourByteTag('c', 'p', 'r', 't'); -static constexpr uint32_t kTAG_A2B0 = SetFourByteTag('A', '2', 'B', '0'); -static constexpr uint32_t kTAG_B2A0 = SetFourByteTag('B', '2', 'A', '0'); - -static constexpr uint32_t kTAG_CurveType = SetFourByteTag('c', 'u', 'r', 'v'); -static constexpr uint32_t kTAG_mABType = SetFourByteTag('m', 'A', 'B', ' '); -static constexpr uint32_t kTAG_mBAType = SetFourByteTag('m', 'B', 'A', ' '); -static constexpr uint32_t kTAG_ParaCurveType = SetFourByteTag('p', 'a', 'r', 'a'); - -static constexpr Matrix3x3 kSRGB = {{ - // ICC fixed-point (16.16) representation, taken from skcms. Please keep them exactly in sync. - // 0.436065674f, 0.385147095f, 0.143066406f, - // 0.222488403f, 0.716873169f, 0.060607910f, - // 0.013916016f, 0.097076416f, 0.714096069f, - {FixedToFloat(0x6FA2), FixedToFloat(0x6299), FixedToFloat(0x24A0)}, - {FixedToFloat(0x38F5), FixedToFloat(0xB785), FixedToFloat(0x0F84)}, - {FixedToFloat(0x0390), FixedToFloat(0x18DA), FixedToFloat(0xB6CF)}, -}}; - -static constexpr Matrix3x3 kDisplayP3 = {{ - {0.515102f, 0.291965f, 0.157153f}, - {0.241182f, 0.692236f, 0.0665819f}, - {-0.00104941f, 0.0418818f, 0.784378f}, -}}; - -static constexpr Matrix3x3 kRec2020 = {{ - {0.673459f, 0.165661f, 0.125100f}, - {0.279033f, 0.675338f, 0.0456288f}, - {-0.00193139f, 0.0299794f, 0.797162f}, -}}; - -static constexpr uint32_t kCICPPrimariesSRGB = 1; -static constexpr uint32_t kCICPPrimariesP3 = 12; -static constexpr uint32_t kCICPPrimariesRec2020 = 9; - -static constexpr uint32_t kCICPTrfnSRGB = 1; -static constexpr uint32_t kCICPTrfnLinear = 8; -static constexpr uint32_t kCICPTrfnPQ = 16; -static constexpr uint32_t kCICPTrfnHLG = 18; - -enum ParaCurveType { - kExponential_ParaCurveType = 0, - kGAB_ParaCurveType = 1, - kGABC_ParaCurveType = 2, - kGABDE_ParaCurveType = 3, - kGABCDEF_ParaCurveType = 4, -}; - -/** - * Return the closest int for the given float. Returns MaxS32FitsInFloat for NaN. - */ -static inline int float_saturate2int(float x) { - x = x < MaxS32FitsInFloat ? x : MaxS32FitsInFloat; - x = x > MinS32FitsInFloat ? x : MinS32FitsInFloat; - return (int)x; -} - -static Fixed float_round_to_fixed(float x) { - return float_saturate2int((float)floor((double)x * Fixed1 + 0.5)); -} - -static uint16_t float_round_to_unorm16(float x) { - x = x * 65535.f + 0.5; - if (x > 65535) return 65535; - if (x < 0) return 0; - return static_cast(x); -} - -static inline void float_to_table16(const float f, uint8_t* table_16) { - *reinterpret_cast(table_16) = Endian_SwapBE16(float_round_to_unorm16(f)); -} - -static inline bool isfinitef_(float x) { return 0 == x * 0; } - -struct ICCHeader { - // Size of the profile (computed) - uint32_t size; - // Preferred CMM type (ignored) - uint32_t cmm_type = 0; - // Version 4.3 or 4.4 if CICP is included. - uint32_t version = Endian_SwapBE32(0x04300000); - // Display device profile - uint32_t profile_class = Endian_SwapBE32(kDisplay_Profile); - // RGB input color space; - uint32_t data_color_space = Endian_SwapBE32(kRGB_ColorSpace); - // Profile connection space. - uint32_t pcs = Endian_SwapBE32(kXYZ_PCSSpace); - // Date and time (ignored) - uint8_t creation_date_time[12] = {0}; - // Profile signature - uint32_t signature = Endian_SwapBE32(kACSP_Signature); - // Platform target (ignored) - uint32_t platform = 0; - // Flags: not embedded, can be used independently - uint32_t flags = 0x00000000; - // Device manufacturer (ignored) - uint32_t device_manufacturer = 0; - // Device model (ignored) - uint32_t device_model = 0; - // Device attributes (ignored) - uint8_t device_attributes[8] = {0}; - // Relative colorimetric rendering intent - uint32_t rendering_intent = Endian_SwapBE32(1); - // D50 standard illuminant (X, Y, Z) - uint32_t illuminant_X = Endian_SwapBE32(float_round_to_fixed(kD50_x)); - uint32_t illuminant_Y = Endian_SwapBE32(float_round_to_fixed(kD50_y)); - uint32_t illuminant_Z = Endian_SwapBE32(float_round_to_fixed(kD50_z)); - // Profile creator (ignored) - uint32_t creator = 0; - // Profile id checksum (ignored) - uint8_t profile_id[16] = {0}; - // Reserved (ignored) - uint8_t reserved[28] = {0}; - // Technically not part of header, but required - uint32_t tag_count = 0; -}; - -class IccHelper { - private: - static constexpr uint32_t kTrcTableSize = 65; - static constexpr uint32_t kGridSize = 17; - static constexpr size_t kNumChannels = 3; - - static std::shared_ptr write_text_tag(const char* text); - static std::string get_desc_string(const ultrahdr_transfer_function tf, - const ultrahdr_color_gamut gamut); - static std::shared_ptr write_xyz_tag(float x, float y, float z); - static std::shared_ptr write_trc_tag(const int table_entries, const void* table_16); - static std::shared_ptr write_trc_tag(const TransferFunction& fn); - static float compute_tone_map_gain(const ultrahdr_transfer_function tf, float L); - static std::shared_ptr write_cicp_tag(uint32_t color_primaries, - uint32_t transfer_characteristics); - static std::shared_ptr write_mAB_or_mBA_tag(uint32_t type, bool has_a_curves, - const uint8_t* grid_points, - const uint8_t* grid_16); - static void compute_lut_entry(const Matrix3x3& src_to_XYZD50, float rgb[3]); - static std::shared_ptr write_clut(const uint8_t* grid_points, const uint8_t* grid_16); - - // Checks if a set of xyz tags is equivalent to a 3x3 Matrix. Each input - // tag buffer assumed to be at least kColorantTagSize in size. - static bool tagsEqualToMatrix(const Matrix3x3& matrix, const uint8_t* red_tag, - const uint8_t* green_tag, const uint8_t* blue_tag); - - public: - // Output includes JPEG embedding identifier and chunk information, but not - // APPx information. - static std::shared_ptr writeIccProfile(const ultrahdr_transfer_function tf, - const ultrahdr_color_gamut gamut); - // NOTE: this function is not robust; it can infer gamuts that IccHelper - // writes out but should not be considered a reference implementation for - // robust parsing of ICC profiles or their gamuts. - static ultrahdr_color_gamut readIccColorGamut(void* icc_data, size_t icc_size); -}; -} // namespace ultrahdr - -#endif // ULTRAHDR_ICC_H diff --git a/lib/include/ultrahdr/gainmapmath.h b/lib/include/ultrahdr/gainmapmath.h new file mode 100644 index 0000000..bdbaf02 --- /dev/null +++ b/lib/include/ultrahdr/gainmapmath.h @@ -0,0 +1,498 @@ +/* + * Copyright 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ULTRAHDR_GAINMAPMATH_H +#define ULTRAHDR_GAINMAPMATH_H + +#include + +#include "ultrahdr.h" +#include "jpegr.h" + +#define CLIP3(x, min, max) ((x) < (min)) ? (min) : ((x) > (max)) ? (max) : (x) + +namespace ultrahdr { + +//////////////////////////////////////////////////////////////////////////////// +// Framework + +const float kSdrWhiteNits = 100.0f; +const float kHlgMaxNits = 1000.0f; +const float kPqMaxNits = 10000.0f; + +struct Color { + union { + struct { + float r; + float g; + float b; + }; + struct { + float y; + float u; + float v; + }; + }; +}; + +typedef Color (*ColorTransformFn)(Color); +typedef float (*ColorCalculationFn)(Color); + +// A transfer function mapping encoded values to linear values, +// represented by this 7-parameter piecewise function: +// +// linear = sign(encoded) * (c*|encoded| + f) , 0 <= |encoded| < d +// = sign(encoded) * ((a*|encoded| + b)^g + e), d <= |encoded| +// +// (A simple gamma transfer function sets g to gamma and a to 1.) +typedef struct TransferFunction { + float g, a, b, c, d, e, f; +} TransferFunction; + +static constexpr TransferFunction kSRGB_TransFun = { + 2.4f, (float)(1 / 1.055), (float)(0.055 / 1.055), (float)(1 / 12.92), 0.04045f, 0.0f, 0.0f}; + +static constexpr TransferFunction kLinear_TransFun = {1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}; + +inline Color operator+=(Color& lhs, const Color& rhs) { + lhs.r += rhs.r; + lhs.g += rhs.g; + lhs.b += rhs.b; + return lhs; +} +inline Color operator-=(Color& lhs, const Color& rhs) { + lhs.r -= rhs.r; + lhs.g -= rhs.g; + lhs.b -= rhs.b; + return lhs; +} + +inline Color operator+(const Color& lhs, const Color& rhs) { + Color temp = lhs; + return temp += rhs; +} +inline Color operator-(const Color& lhs, const Color& rhs) { + Color temp = lhs; + return temp -= rhs; +} + +inline Color operator+=(Color& lhs, const float rhs) { + lhs.r += rhs; + lhs.g += rhs; + lhs.b += rhs; + return lhs; +} +inline Color operator-=(Color& lhs, const float rhs) { + lhs.r -= rhs; + lhs.g -= rhs; + lhs.b -= rhs; + return lhs; +} +inline Color operator*=(Color& lhs, const float rhs) { + lhs.r *= rhs; + lhs.g *= rhs; + lhs.b *= rhs; + return lhs; +} +inline Color operator/=(Color& lhs, const float rhs) { + lhs.r /= rhs; + lhs.g /= rhs; + lhs.b /= rhs; + return lhs; +} + +inline Color operator+(const Color& lhs, const float rhs) { + Color temp = lhs; + return temp += rhs; +} +inline Color operator-(const Color& lhs, const float rhs) { + Color temp = lhs; + return temp -= rhs; +} +inline Color operator*(const Color& lhs, const float rhs) { + Color temp = lhs; + return temp *= rhs; +} +inline Color operator/(const Color& lhs, const float rhs) { + Color temp = lhs; + return temp /= rhs; +} + +inline uint16_t floatToHalf(float f) { + // round-to-nearest-even: add last bit after truncated mantissa + const uint32_t b = *((uint32_t*)&f) + 0x00001000; + + const int32_t e = (b & 0x7F800000) >> 23; // exponent + const uint32_t m = b & 0x007FFFFF; // mantissa + + // sign : normalized : denormalized : saturate + return (b & 0x80000000) >> 16 | (e > 112) * ((((e - 112) << 10) & 0x7C00) | m >> 13) | + ((e < 113) & (e > 101)) * ((((0x007FF000 + m) >> (125 - e)) + 1) >> 1) | + (e > 143) * 0x7FFF; +} + +constexpr size_t kGainFactorPrecision = 10; +constexpr size_t kGainFactorNumEntries = 1 << kGainFactorPrecision; +struct GainLUT { + GainLUT(ultrahdr_metadata_ptr metadata) { + for (size_t idx = 0; idx < kGainFactorNumEntries; idx++) { + float value = static_cast(idx) / static_cast(kGainFactorNumEntries - 1); + float logBoost = log2(metadata->minContentBoost) * (1.0f - value) + + log2(metadata->maxContentBoost) * value; + mGainTable[idx] = exp2(logBoost); + } + } + + GainLUT(ultrahdr_metadata_ptr metadata, float displayBoost) { + float boostFactor = displayBoost > 0 ? displayBoost / metadata->maxContentBoost : 1.0f; + for (size_t idx = 0; idx < kGainFactorNumEntries; idx++) { + float value = static_cast(idx) / static_cast(kGainFactorNumEntries - 1); + float logBoost = log2(metadata->minContentBoost) * (1.0f - value) + + log2(metadata->maxContentBoost) * value; + mGainTable[idx] = exp2(logBoost * boostFactor); + } + } + + ~GainLUT() {} + + float getGainFactor(float gain) { + uint32_t idx = static_cast(gain * (kGainFactorNumEntries - 1) + 0.5); + // TODO() : Remove once conversion modules have appropriate clamping in place + idx = CLIP3(idx, 0, kGainFactorNumEntries - 1); + return mGainTable[idx]; + } + + private: + float mGainTable[kGainFactorNumEntries]; +}; + +struct ShepardsIDW { + ShepardsIDW(int mapScaleFactor) : mMapScaleFactor{mapScaleFactor} { + const int size = mMapScaleFactor * mMapScaleFactor * 4; + mWeights = new float[size]; + mWeightsNR = new float[size]; + mWeightsNB = new float[size]; + mWeightsC = new float[size]; + fillShepardsIDW(mWeights, 1, 1); + fillShepardsIDW(mWeightsNR, 0, 1); + fillShepardsIDW(mWeightsNB, 1, 0); + fillShepardsIDW(mWeightsC, 0, 0); + } + ~ShepardsIDW() { + delete[] mWeights; + delete[] mWeightsNR; + delete[] mWeightsNB; + delete[] mWeightsC; + } + + int mMapScaleFactor; + // Image :- + // p00 p01 p02 p03 p04 p05 p06 p07 + // p10 p11 p12 p13 p14 p15 p16 p17 + // p20 p21 p22 p23 p24 p25 p26 p27 + // p30 p31 p32 p33 p34 p35 p36 p37 + // p40 p41 p42 p43 p44 p45 p46 p47 + // p50 p51 p52 p53 p54 p55 p56 p57 + // p60 p61 p62 p63 p64 p65 p66 p67 + // p70 p71 p72 p73 p74 p75 p76 p77 + + // Gain Map (for 4 scale factor) :- + // m00 p01 + // m10 m11 + + // Gain sample of curr 4x4, right 4x4, bottom 4x4, bottom right 4x4 are used during + // reconstruction. hence table weight size is 4. + float* mWeights; + // TODO: check if its ok to mWeights at places + float* mWeightsNR; // no right + float* mWeightsNB; // no bottom + float* mWeightsC; // no right & bottom + + float euclideanDistance(float x1, float x2, float y1, float y2); + void fillShepardsIDW(float* weights, int incR, int incB); +}; + +//////////////////////////////////////////////////////////////////////////////// +// sRGB transformations +// NOTE: sRGB has the same color primaries as BT.709, but different transfer +// function. For this reason, all sRGB transformations here apply to BT.709, +// except for those concerning transfer functions. + +/* + * Calculate the luminance of a linear RGB sRGB pixel, according to + * IEC 61966-2-1/Amd 1:2003. + * + * [0.0, 1.0] range in and out. + */ +float srgbLuminance(Color e); + +/* + * Convert from OETF'd srgb RGB to YUV, according to ITU-R BT.709-6. + * + * BT.709 YUV<->RGB matrix is used to match expectations for DataSpace. + */ +Color srgbRgbToYuv(Color e_gamma); + +/* + * Convert from OETF'd srgb YUV to RGB, according to ITU-R BT.709-6. + * + * BT.709 YUV<->RGB matrix is used to match expectations for DataSpace. + */ +Color srgbYuvToRgb(Color e_gamma); + +/* + * Convert from srgb to linear, according to IEC 61966-2-1/Amd 1:2003. + * + * [0.0, 1.0] range in and out. + */ +float srgbInvOetf(float e_gamma); +Color srgbInvOetf(Color e_gamma); +float srgbInvOetfLUT(float e_gamma); +Color srgbInvOetfLUT(Color e_gamma); + +constexpr size_t kSrgbInvOETFPrecision = 10; +constexpr size_t kSrgbInvOETFNumEntries = 1 << kSrgbInvOETFPrecision; + +//////////////////////////////////////////////////////////////////////////////// +// Display-P3 transformations + +/* + * Calculated the luminance of a linear RGB P3 pixel, according to SMPTE EG 432-1. + * + * [0.0, 1.0] range in and out. + */ +float p3Luminance(Color e); + +/* + * Convert from OETF'd P3 RGB to YUV, according to ITU-R BT.601-7. + * + * BT.601 YUV<->RGB matrix is used to match expectations for DataSpace. + */ +Color p3RgbToYuv(Color e_gamma); + +/* + * Convert from OETF'd P3 YUV to RGB, according to ITU-R BT.601-7. + * + * BT.601 YUV<->RGB matrix is used to match expectations for DataSpace. + */ +Color p3YuvToRgb(Color e_gamma); + +//////////////////////////////////////////////////////////////////////////////// +// BT.2100 transformations - according to ITU-R BT.2100-2 + +/* + * Calculate the luminance of a linear RGB BT.2100 pixel. + * + * [0.0, 1.0] range in and out. + */ +float bt2100Luminance(Color e); + +/* + * Convert from OETF'd BT.2100 RGB to YUV, according to ITU-R BT.2100-2. + * + * BT.2100 YUV<->RGB matrix is used to match expectations for DataSpace. + */ +Color bt2100RgbToYuv(Color e_gamma); + +/* + * Convert from OETF'd BT.2100 YUV to RGB, according to ITU-R BT.2100-2. + * + * BT.2100 YUV<->RGB matrix is used to match expectations for DataSpace. + */ +Color bt2100YuvToRgb(Color e_gamma); + +/* + * Convert from scene luminance to HLG. + * + * [0.0, 1.0] range in and out. + */ +float hlgOetf(float e); +Color hlgOetf(Color e); +float hlgOetfLUT(float e); +Color hlgOetfLUT(Color e); + +constexpr size_t kHlgOETFPrecision = 16; +constexpr size_t kHlgOETFNumEntries = 1 << kHlgOETFPrecision; + +/* + * Convert from HLG to scene luminance. + * + * [0.0, 1.0] range in and out. + */ +float hlgInvOetf(float e_gamma); +Color hlgInvOetf(Color e_gamma); +float hlgInvOetfLUT(float e_gamma); +Color hlgInvOetfLUT(Color e_gamma); + +constexpr size_t kHlgInvOETFPrecision = 12; +constexpr size_t kHlgInvOETFNumEntries = 1 << kHlgInvOETFPrecision; + +/* + * Convert from scene luminance to PQ. + * + * [0.0, 1.0] range in and out. + */ +float pqOetf(float e); +Color pqOetf(Color e); +float pqOetfLUT(float e); +Color pqOetfLUT(Color e); + +constexpr size_t kPqOETFPrecision = 16; +constexpr size_t kPqOETFNumEntries = 1 << kPqOETFPrecision; + +/* + * Convert from PQ to scene luminance in nits. + * + * [0.0, 1.0] range in and out. + */ +float pqInvOetf(float e_gamma); +Color pqInvOetf(Color e_gamma); +float pqInvOetfLUT(float e_gamma); +Color pqInvOetfLUT(Color e_gamma); + +constexpr size_t kPqInvOETFPrecision = 12; +constexpr size_t kPqInvOETFNumEntries = 1 << kPqInvOETFPrecision; + +//////////////////////////////////////////////////////////////////////////////// +// Color space conversions + +/* + * Convert between color spaces with linear RGB data, according to ITU-R BT.2407 and EG 432-1. + * + * All conversions are derived from multiplying the matrix for XYZ to output RGB color gamut by the + * matrix for input RGB color gamut to XYZ. The matrix for converting from XYZ to an RGB gamut is + * always the inverse of the RGB gamut to XYZ matrix. + */ +Color bt709ToP3(Color e); +Color bt709ToBt2100(Color e); +Color p3ToBt709(Color e); +Color p3ToBt2100(Color e); +Color bt2100ToBt709(Color e); +Color bt2100ToP3(Color e); + +/* + * Identity conversion. + */ +inline Color identityConversion(Color e) { return e; } + +/* + * Get the conversion to apply to the HDR image for gain map generation + */ +ColorTransformFn getHdrConversionFn(ultrahdr_color_gamut sdr_gamut, ultrahdr_color_gamut hdr_gamut); + +/* + * Convert between YUV encodings, according to ITU-R BT.709-6, ITU-R BT.601-7, and ITU-R BT.2100-2. + * + * Bt.709 and Bt.2100 have well-defined YUV encodings; Display-P3's is less well defined, but is + * treated as Bt.601 by DataSpace, hence we do the same. + */ +Color yuv709To601(Color e_gamma); +Color yuv709To2100(Color e_gamma); +Color yuv601To709(Color e_gamma); +Color yuv601To2100(Color e_gamma); +Color yuv2100To709(Color e_gamma); +Color yuv2100To601(Color e_gamma); + +/* + * Performs a transformation at the chroma x and y coordinates provided on a YUV420 image. + * + * Apply the transformation by determining transformed YUV for each of the 4 Y + 1 UV; each Y gets + * this result, and UV gets the averaged result. + * + * x_chroma and y_chroma should be less than or equal to half the image's width and height + * respecitively, since input is 4:2:0 subsampled. + */ +void transformYuv420(jr_uncompressed_ptr image, size_t x_chroma, size_t y_chroma, + ColorTransformFn fn); + +//////////////////////////////////////////////////////////////////////////////// +// Gain map calculations + +/* + * Calculate the 8-bit unsigned integer gain value for the given SDR and HDR + * luminances in linear space, and the hdr ratio to encode against. + * + * Note: since this library always uses gamma of 1.0, offsetSdr of 0.0, and + * offsetHdr of 0.0, this function doesn't handle different metadata values for + * these fields. + */ +uint8_t encodeGain(float y_sdr, float y_hdr, ultrahdr_metadata_ptr metadata); +uint8_t encodeGain(float y_sdr, float y_hdr, ultrahdr_metadata_ptr metadata, + float log2MinContentBoost, float log2MaxContentBoost); + +/* + * Calculates the linear luminance in nits after applying the given gain + * value, with the given hdr ratio, to the given sdr input in the range [0, 1]. + * + * Note: similar to encodeGain(), this function only supports gamma 1.0, + * offsetSdr 0.0, offsetHdr 0.0, hdrCapacityMin 1.0, and hdrCapacityMax equal to + * gainMapMax, as this library encodes. + */ +Color applyGain(Color e, float gain, ultrahdr_metadata_ptr metadata); +Color applyGain(Color e, float gain, ultrahdr_metadata_ptr metadata, float displayBoost); +Color applyGainLUT(Color e, float gain, GainLUT& gainLUT); + +/* + * Helper for sampling from YUV 420 images. + */ +Color getYuv420Pixel(jr_uncompressed_ptr image, size_t x, size_t y); + +/* + * Helper for sampling from P010 images. + * + * Expect narrow-range image data for P010. + */ +Color getP010Pixel(jr_uncompressed_ptr image, size_t x, size_t y); + +/* + * Sample the image at the provided location, with a weighting based on nearby + * pixels and the map scale factor. + */ +Color sampleYuv420(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y); + +/* + * Sample the image at the provided location, with a weighting based on nearby + * pixels and the map scale factor. + * + * Expect narrow-range image data for P010. + */ +Color sampleP010(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y); + +/* + * Sample the gain value for the map from a given x,y coordinate on a scale + * that is map scale factor larger than the map size. + */ +float sampleMap(jr_uncompressed_ptr map, float map_scale_factor, size_t x, size_t y); +float sampleMap(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y, + ShepardsIDW& weightTables); + +/* + * Convert from Color to RGBA1010102. + * + * Alpha always set to 1.0. + */ +uint32_t colorToRgba1010102(Color e_gamma); + +/* + * Convert from Color to F16. + * + * Alpha always set to 1.0. + */ +uint64_t colorToRgbaF16(Color e_gamma); + +} // namespace ultrahdr + +#endif // ULTRAHDR_GAINMAPMATH_H diff --git a/lib/include/ultrahdr/icc.h b/lib/include/ultrahdr/icc.h new file mode 100644 index 0000000..a0b4680 --- /dev/null +++ b/lib/include/ultrahdr/icc.h @@ -0,0 +1,261 @@ +/* + * Copyright 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ULTRAHDR_ICC_H +#define ULTRAHDR_ICC_H + +#include + +#ifndef USE_BIG_ENDIAN_IN_ICC +#define USE_BIG_ENDIAN_IN_ICC true +#endif + +#undef Endian_SwapBE32 +#undef Endian_SwapBE16 +#if USE_BIG_ENDIAN_IN_ICC +#define Endian_SwapBE32(n) EndianSwap32(n) +#define Endian_SwapBE16(n) EndianSwap16(n) +#else +#define Endian_SwapBE32(n) (n) +#define Endian_SwapBE16(n) (n) +#endif + +#include "ultrahdr.h" +#include "jpegr.h" +#include "gainmapmath.h" +#include "jpegrutils.h" + +namespace ultrahdr { + +typedef int32_t Fixed; +#define Fixed1 (1 << 16) +#define MaxS32FitsInFloat 2147483520 +#define MinS32FitsInFloat (-MaxS32FitsInFloat) +#define FixedToFloat(x) ((x)*1.52587890625e-5f) + +typedef struct Matrix3x3 { + float vals[3][3]; +} Matrix3x3; + +// The D50 illuminant. +constexpr float kD50_x = 0.9642f; +constexpr float kD50_y = 1.0000f; +constexpr float kD50_z = 0.8249f; + +enum { + // data_color_space + Signature_CMYK = 0x434D594B, + Signature_Gray = 0x47524159, + Signature_RGB = 0x52474220, + + // pcs + Signature_Lab = 0x4C616220, + Signature_XYZ = 0x58595A20, +}; + +typedef uint32_t FourByteTag; +static inline constexpr FourByteTag SetFourByteTag(char a, char b, char c, char d) { + return (((uint32_t)a << 24) | ((uint32_t)b << 16) | ((uint32_t)c << 8) | (uint32_t)d); +} + +static constexpr char kICCIdentifier[] = "ICC_PROFILE"; +// 12 for the actual identifier, +2 for the chunk count and chunk index which +// will always follow. +static constexpr size_t kICCIdentifierSize = 14; + +// This is equal to the header size according to the ICC specification (128) +// plus the size of the tag count (4). We include the tag count since we +// always require it to be present anyway. +static constexpr size_t kICCHeaderSize = 132; + +// Contains a signature (4), offset (4), and size (4). +static constexpr size_t kICCTagTableEntrySize = 12; + +// size should be 20; 4 bytes for type descriptor, 4 bytes reserved, 12 +// bytes for a single XYZ number type (4 bytes per coordinate). +static constexpr size_t kColorantTagSize = 20; + +static constexpr uint32_t kDisplay_Profile = SetFourByteTag('m', 'n', 't', 'r'); +static constexpr uint32_t kRGB_ColorSpace = SetFourByteTag('R', 'G', 'B', ' '); +static constexpr uint32_t kXYZ_PCSSpace = SetFourByteTag('X', 'Y', 'Z', ' '); +static constexpr uint32_t kACSP_Signature = SetFourByteTag('a', 'c', 's', 'p'); + +static constexpr uint32_t kTAG_desc = SetFourByteTag('d', 'e', 's', 'c'); +static constexpr uint32_t kTAG_TextType = SetFourByteTag('m', 'l', 'u', 'c'); +static constexpr uint32_t kTAG_rXYZ = SetFourByteTag('r', 'X', 'Y', 'Z'); +static constexpr uint32_t kTAG_gXYZ = SetFourByteTag('g', 'X', 'Y', 'Z'); +static constexpr uint32_t kTAG_bXYZ = SetFourByteTag('b', 'X', 'Y', 'Z'); +static constexpr uint32_t kTAG_wtpt = SetFourByteTag('w', 't', 'p', 't'); +static constexpr uint32_t kTAG_rTRC = SetFourByteTag('r', 'T', 'R', 'C'); +static constexpr uint32_t kTAG_gTRC = SetFourByteTag('g', 'T', 'R', 'C'); +static constexpr uint32_t kTAG_bTRC = SetFourByteTag('b', 'T', 'R', 'C'); +static constexpr uint32_t kTAG_cicp = SetFourByteTag('c', 'i', 'c', 'p'); +static constexpr uint32_t kTAG_cprt = SetFourByteTag('c', 'p', 'r', 't'); +static constexpr uint32_t kTAG_A2B0 = SetFourByteTag('A', '2', 'B', '0'); +static constexpr uint32_t kTAG_B2A0 = SetFourByteTag('B', '2', 'A', '0'); + +static constexpr uint32_t kTAG_CurveType = SetFourByteTag('c', 'u', 'r', 'v'); +static constexpr uint32_t kTAG_mABType = SetFourByteTag('m', 'A', 'B', ' '); +static constexpr uint32_t kTAG_mBAType = SetFourByteTag('m', 'B', 'A', ' '); +static constexpr uint32_t kTAG_ParaCurveType = SetFourByteTag('p', 'a', 'r', 'a'); + +static constexpr Matrix3x3 kSRGB = {{ + // ICC fixed-point (16.16) representation, taken from skcms. Please keep them exactly in sync. + // 0.436065674f, 0.385147095f, 0.143066406f, + // 0.222488403f, 0.716873169f, 0.060607910f, + // 0.013916016f, 0.097076416f, 0.714096069f, + {FixedToFloat(0x6FA2), FixedToFloat(0x6299), FixedToFloat(0x24A0)}, + {FixedToFloat(0x38F5), FixedToFloat(0xB785), FixedToFloat(0x0F84)}, + {FixedToFloat(0x0390), FixedToFloat(0x18DA), FixedToFloat(0xB6CF)}, +}}; + +static constexpr Matrix3x3 kDisplayP3 = {{ + {0.515102f, 0.291965f, 0.157153f}, + {0.241182f, 0.692236f, 0.0665819f}, + {-0.00104941f, 0.0418818f, 0.784378f}, +}}; + +static constexpr Matrix3x3 kRec2020 = {{ + {0.673459f, 0.165661f, 0.125100f}, + {0.279033f, 0.675338f, 0.0456288f}, + {-0.00193139f, 0.0299794f, 0.797162f}, +}}; + +static constexpr uint32_t kCICPPrimariesSRGB = 1; +static constexpr uint32_t kCICPPrimariesP3 = 12; +static constexpr uint32_t kCICPPrimariesRec2020 = 9; + +static constexpr uint32_t kCICPTrfnSRGB = 1; +static constexpr uint32_t kCICPTrfnLinear = 8; +static constexpr uint32_t kCICPTrfnPQ = 16; +static constexpr uint32_t kCICPTrfnHLG = 18; + +enum ParaCurveType { + kExponential_ParaCurveType = 0, + kGAB_ParaCurveType = 1, + kGABC_ParaCurveType = 2, + kGABDE_ParaCurveType = 3, + kGABCDEF_ParaCurveType = 4, +}; + +/** + * Return the closest int for the given float. Returns MaxS32FitsInFloat for NaN. + */ +static inline int float_saturate2int(float x) { + x = x < MaxS32FitsInFloat ? x : MaxS32FitsInFloat; + x = x > MinS32FitsInFloat ? x : MinS32FitsInFloat; + return (int)x; +} + +static Fixed float_round_to_fixed(float x) { + return float_saturate2int((float)floor((double)x * Fixed1 + 0.5)); +} + +static uint16_t float_round_to_unorm16(float x) { + x = x * 65535.f + 0.5; + if (x > 65535) return 65535; + if (x < 0) return 0; + return static_cast(x); +} + +static inline void float_to_table16(const float f, uint8_t* table_16) { + *reinterpret_cast(table_16) = Endian_SwapBE16(float_round_to_unorm16(f)); +} + +static inline bool isfinitef_(float x) { return 0 == x * 0; } + +struct ICCHeader { + // Size of the profile (computed) + uint32_t size; + // Preferred CMM type (ignored) + uint32_t cmm_type = 0; + // Version 4.3 or 4.4 if CICP is included. + uint32_t version = Endian_SwapBE32(0x04300000); + // Display device profile + uint32_t profile_class = Endian_SwapBE32(kDisplay_Profile); + // RGB input color space; + uint32_t data_color_space = Endian_SwapBE32(kRGB_ColorSpace); + // Profile connection space. + uint32_t pcs = Endian_SwapBE32(kXYZ_PCSSpace); + // Date and time (ignored) + uint8_t creation_date_time[12] = {0}; + // Profile signature + uint32_t signature = Endian_SwapBE32(kACSP_Signature); + // Platform target (ignored) + uint32_t platform = 0; + // Flags: not embedded, can be used independently + uint32_t flags = 0x00000000; + // Device manufacturer (ignored) + uint32_t device_manufacturer = 0; + // Device model (ignored) + uint32_t device_model = 0; + // Device attributes (ignored) + uint8_t device_attributes[8] = {0}; + // Relative colorimetric rendering intent + uint32_t rendering_intent = Endian_SwapBE32(1); + // D50 standard illuminant (X, Y, Z) + uint32_t illuminant_X = Endian_SwapBE32(float_round_to_fixed(kD50_x)); + uint32_t illuminant_Y = Endian_SwapBE32(float_round_to_fixed(kD50_y)); + uint32_t illuminant_Z = Endian_SwapBE32(float_round_to_fixed(kD50_z)); + // Profile creator (ignored) + uint32_t creator = 0; + // Profile id checksum (ignored) + uint8_t profile_id[16] = {0}; + // Reserved (ignored) + uint8_t reserved[28] = {0}; + // Technically not part of header, but required + uint32_t tag_count = 0; +}; + +class IccHelper { + private: + static constexpr uint32_t kTrcTableSize = 65; + static constexpr uint32_t kGridSize = 17; + static constexpr size_t kNumChannels = 3; + + static std::shared_ptr write_text_tag(const char* text); + static std::string get_desc_string(const ultrahdr_transfer_function tf, + const ultrahdr_color_gamut gamut); + static std::shared_ptr write_xyz_tag(float x, float y, float z); + static std::shared_ptr write_trc_tag(const int table_entries, const void* table_16); + static std::shared_ptr write_trc_tag(const TransferFunction& fn); + static float compute_tone_map_gain(const ultrahdr_transfer_function tf, float L); + static std::shared_ptr write_cicp_tag(uint32_t color_primaries, + uint32_t transfer_characteristics); + static std::shared_ptr write_mAB_or_mBA_tag(uint32_t type, bool has_a_curves, + const uint8_t* grid_points, + const uint8_t* grid_16); + static void compute_lut_entry(const Matrix3x3& src_to_XYZD50, float rgb[3]); + static std::shared_ptr write_clut(const uint8_t* grid_points, const uint8_t* grid_16); + + // Checks if a set of xyz tags is equivalent to a 3x3 Matrix. Each input + // tag buffer assumed to be at least kColorantTagSize in size. + static bool tagsEqualToMatrix(const Matrix3x3& matrix, const uint8_t* red_tag, + const uint8_t* green_tag, const uint8_t* blue_tag); + + public: + // Output includes JPEG embedding identifier and chunk information, but not + // APPx information. + static std::shared_ptr writeIccProfile(const ultrahdr_transfer_function tf, + const ultrahdr_color_gamut gamut); + // NOTE: this function is not robust; it can infer gamuts that IccHelper + // writes out but should not be considered a reference implementation for + // robust parsing of ICC profiles or their gamuts. + static ultrahdr_color_gamut readIccColorGamut(void* icc_data, size_t icc_size); +}; +} // namespace ultrahdr + +#endif // ULTRAHDR_ICC_H diff --git a/lib/include/ultrahdr/jpegdecoderhelper.h b/lib/include/ultrahdr/jpegdecoderhelper.h new file mode 100644 index 0000000..01a05e4 --- /dev/null +++ b/lib/include/ultrahdr/jpegdecoderhelper.h @@ -0,0 +1,154 @@ +/* + * Copyright 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ULTRAHDR_JPEGDECODERHELPER_H +#define ULTRAHDR_JPEGDECODERHELPER_H + +#include // For jpeglib.h. + +// C++ build requires extern C for jpeg internals. +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#ifdef __cplusplus +} // extern "C" +#endif + +#include +#include +#include + +// constraint on max width and max height is only due to device alloc constraints +// Can tune these values basing on the target device +static const int kMaxWidth = 8192; +static const int kMaxHeight = 8192; + +namespace ultrahdr { +/* + * Encapsulates a converter from JPEG to raw image (YUV420planer or grey-scale) format. + * This class is not thread-safe. + */ +class JpegDecoderHelper { + public: + JpegDecoderHelper(); + ~JpegDecoderHelper(); + /* + * Decompresses JPEG image to raw image (YUV420planer, grey-scale or RGBA) format. After + * calling this method, call getDecompressedImage() to get the image. + * Returns false if decompressing the image fails. + */ + bool decompressImage(const void* image, int length, bool decodeToRGBA = false); + /* + * Returns the decompressed raw image buffer pointer. This method must be called only after + * calling decompressImage(). + */ + void* getDecompressedImagePtr(); + /* + * Returns the decompressed raw image buffer size. This method must be called only after + * calling decompressImage(). + */ + size_t getDecompressedImageSize(); + /* + * Returns the image width in pixels. This method must be called only after calling + * decompressImage(). + */ + size_t getDecompressedImageWidth(); + /* + * Returns the image width in pixels. This method must be called only after calling + * decompressImage(). + */ + size_t getDecompressedImageHeight(); + /* + * Returns the XMP data from the image. + */ + void* getXMPPtr(); + /* + * Returns the decompressed XMP buffer size. This method must be called only after + * calling decompressImage() or getCompressedImageParameters(). + */ + size_t getXMPSize(); + /* + * Extracts EXIF package and updates the EXIF position / length without decoding the image. + */ + bool extractEXIF(const void* image, int length); + /* + * Returns the EXIF data from the image. + * This method must be called after extractEXIF() or decompressImage(). + */ + void* getEXIFPtr(); + /* + * Returns the decompressed EXIF buffer size. This method must be called only after + * calling decompressImage(), extractEXIF() or getCompressedImageParameters(). + */ + size_t getEXIFSize(); + /* + * Returns the position offset of EXIF package + * (4 bypes offset to FF sign, the byte after FF E1 XX XX ), + * or -1 if no EXIF exists. + * This method must be called after extractEXIF() or decompressImage(). + */ + int getEXIFPos() { return mExifPos; } + /* + * Returns the ICC data from the image. + */ + void* getICCPtr(); + /* + * Returns the decompressed ICC buffer size. This method must be called only after + * calling decompressImage() or getCompressedImageParameters(). + */ + size_t getICCSize(); + /* + * Decompresses metadata of the image. All vectors are owned by the caller. + */ + bool getCompressedImageParameters(const void* image, int length, size_t* pWidth, size_t* pHeight, + std::vector* iccData, std::vector* exifData); + + private: + bool decode(const void* image, int length, bool decodeToRGBA); + // Returns false if errors occur. + bool decompress(jpeg_decompress_struct* cinfo, const uint8_t* dest, bool isSingleChannel); + bool decompressYUV(jpeg_decompress_struct* cinfo, const uint8_t* dest); + bool decompressRGBA(jpeg_decompress_struct* cinfo, const uint8_t* dest); + bool decompressSingleChannel(jpeg_decompress_struct* cinfo, const uint8_t* dest); + // Process 16 lines of Y and 16 lines of U/V each time. + // We must pass at least 16 scanlines according to libjpeg documentation. + static const int kCompressBatchSize = 16; + // The buffer that holds the decompressed result. + std::vector mResultBuffer; + // The buffer that holds XMP Data. + std::vector mXMPBuffer; + // The buffer that holds EXIF Data. + std::vector mEXIFBuffer; + // The buffer that holds ICC Data. + std::vector mICCBuffer; + + // Resolution of the decompressed image. + size_t mWidth; + size_t mHeight; + + // Position of EXIF package, default value is -1 which means no EXIF package appears. + int mExifPos = -1; + + std::unique_ptr mEmpty = nullptr; + std::unique_ptr mBufferIntermediate = nullptr; +}; +} /* namespace ultrahdr */ + +#endif // ULTRAHDR_JPEGDECODERHELPER_H diff --git a/lib/include/ultrahdr/jpegencoderhelper.h b/lib/include/ultrahdr/jpegencoderhelper.h new file mode 100644 index 0000000..e988578 --- /dev/null +++ b/lib/include/ultrahdr/jpegencoderhelper.h @@ -0,0 +1,106 @@ +/* + * Copyright 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ULTRAHDR_JPEGENCODERHELPER_H +#define ULTRAHDR_JPEGENCODERHELPER_H + +#include // For jpeglib.h. + +// C++ build requires extern C for jpeg internals. +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#ifdef __cplusplus +} // extern "C" +#endif + +#include +#include + +namespace ultrahdr { + +/* + * Encapsulates a converter from raw image (YUV420planer or grey-scale) to JPEG format. + * This class is not thread-safe. + */ +class JpegEncoderHelper { + public: + JpegEncoderHelper(); + ~JpegEncoderHelper(); + + /* + * Compresses YUV420Planer image to JPEG format. After calling this method, call + * getCompressedImage() to get the image. |quality| is the jpeg image quality parameter to use. + * It ranges from 1 (poorest quality) to 100 (highest quality). |iccBuffer| is the buffer of + * ICC segment which will be added to the compressed image. + * Returns false if errors occur during compression. + */ + bool compressImage(const uint8_t* yBuffer, const uint8_t* uvBuffer, int width, int height, + int lumaStride, int chromaStride, int quality, const void* iccBuffer, + unsigned int iccSize); + + /* + * Returns the compressed JPEG buffer pointer. This method must be called only after calling + * compressImage(). + */ + void* getCompressedImagePtr(); + + /* + * Returns the compressed JPEG buffer size. This method must be called only after calling + * compressImage(). + */ + size_t getCompressedImageSize(); + + /* + * Process 16 lines of Y and 16 lines of U/V each time. + * We must pass at least 16 scanlines according to libjpeg documentation. + */ + static const int kCompressBatchSize = 16; + + private: + // initDestination(), emptyOutputBuffer() and emptyOutputBuffer() are callback functions to be + // passed into jpeg library. + static void initDestination(j_compress_ptr cinfo); + static boolean emptyOutputBuffer(j_compress_ptr cinfo); + static void terminateDestination(j_compress_ptr cinfo); + static void outputErrorMessage(j_common_ptr cinfo); + + // Returns false if errors occur. + bool encode(const uint8_t* yBuffer, const uint8_t* uvBuffer, int width, int height, + int lumaStride, int chromaStride, int quality, const void* iccBuffer, + unsigned int iccSize); + void setJpegDestination(jpeg_compress_struct* cinfo); + void setJpegCompressStruct(int width, int height, int quality, jpeg_compress_struct* cinfo, + bool isSingleChannel); + // Returns false if errors occur. + bool compressYuv(jpeg_compress_struct* cinfo, const uint8_t* yBuffer, const uint8_t* uvBuffer, + int lumaStride, int chromaStride); + bool compressY(jpeg_compress_struct* cinfo, const uint8_t* yBuffer, int lumaStride); + + // The block size for encoded jpeg image buffer. + static const int kBlockSize = 16384; + + // The buffer that holds the compressed result. + std::vector mResultBuffer; +}; + +} /* namespace ultrahdr */ + +#endif // ULTRAHDR_JPEGENCODERHELPER_H diff --git a/lib/include/ultrahdr/jpegr.h b/lib/include/ultrahdr/jpegr.h new file mode 100644 index 0000000..8ac227d --- /dev/null +++ b/lib/include/ultrahdr/jpegr.h @@ -0,0 +1,464 @@ +/* + * Copyright 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ULTRAHDR_JPEGR_H +#define ULTRAHDR_JPEGR_H + +#include + +#include "ultrahdr/ultrahdr.h" +#include "ultrahdr/jpegdecoderhelper.h" +#include "ultrahdr/jpegencoderhelper.h" + +namespace ultrahdr { + +// The current JPEGR version that we encode to +static const char* const kJpegrVersion = "1.0"; + +// Map is quarter res / sixteenth size +static const size_t kMapDimensionScaleFactor = 4; + +// Gain Map width is (image_width / kMapDimensionScaleFactor). If we were to +// compress 420 GainMap in jpeg, then we need at least 2 samples. For Grayscale +// 1 sample is sufficient. We are using 2 here anyways +static const int kMinWidth = 2 * kMapDimensionScaleFactor; +static const int kMinHeight = 2 * kMapDimensionScaleFactor; + +typedef enum { + JPEGR_NO_ERROR = 0, + JPEGR_UNKNOWN_ERROR = -1, + + JPEGR_IO_ERROR_BASE = -10000, + ERROR_JPEGR_BAD_PTR = JPEGR_IO_ERROR_BASE - 1, + ERROR_JPEGR_UNSUPPORTED_WIDTH_HEIGHT = JPEGR_IO_ERROR_BASE - 2, + ERROR_JPEGR_INVALID_COLORGAMUT = JPEGR_IO_ERROR_BASE - 3, + ERROR_JPEGR_INVALID_STRIDE = JPEGR_IO_ERROR_BASE - 4, + ERROR_JPEGR_INVALID_TRANS_FUNC = JPEGR_IO_ERROR_BASE - 5, + ERROR_JPEGR_RESOLUTION_MISMATCH = JPEGR_IO_ERROR_BASE - 6, + ERROR_JPEGR_INVALID_QUALITY_FACTOR = JPEGR_IO_ERROR_BASE - 7, + ERROR_JPEGR_INVALID_DISPLAY_BOOST = JPEGR_IO_ERROR_BASE - 8, + ERROR_JPEGR_INVALID_OUTPUT_FORMAT = JPEGR_IO_ERROR_BASE - 9, + ERROR_JPEGR_BAD_METADATA = JPEGR_IO_ERROR_BASE - 10, + + JPEGR_RUNTIME_ERROR_BASE = -20000, + ERROR_JPEGR_ENCODE_ERROR = JPEGR_RUNTIME_ERROR_BASE - 1, + ERROR_JPEGR_DECODE_ERROR = JPEGR_RUNTIME_ERROR_BASE - 2, + ERROR_JPEGR_GAIN_MAP_IMAGE_NOT_FOUND = JPEGR_RUNTIME_ERROR_BASE - 3, + ERROR_JPEGR_BUFFER_TOO_SMALL = JPEGR_RUNTIME_ERROR_BASE - 4, + ERROR_JPEGR_METADATA_ERROR = JPEGR_RUNTIME_ERROR_BASE - 5, + ERROR_JPEGR_NO_IMAGES_FOUND = JPEGR_RUNTIME_ERROR_BASE - 6, + ERROR_JPEGR_MULTIPLE_EXIFS_RECEIVED = JPEGR_RUNTIME_ERROR_BASE - 7, + + ERROR_JPEGR_UNSUPPORTED_FEATURE = -30000, +} status_t; + +/* + * Holds information of jpegr image + */ +struct jpegr_info_struct { + size_t width; + size_t height; + std::vector* iccData; + std::vector* exifData; +}; + +/* + * Holds information for uncompressed image or gain map. + */ +struct jpegr_uncompressed_struct { + // Pointer to the data location. + void* data; + // Width of the gain map or the luma plane of the image in pixels. + size_t width; + // Height of the gain map or the luma plane of the image in pixels. + size_t height; + // Color gamut. + ultrahdr_color_gamut colorGamut; + + // Values below are optional + // Pointer to chroma data, if it's NULL, chroma plane is considered to be immediately + // after the luma plane. + void* chroma_data = nullptr; + // Stride of Y plane in number of pixels. 0 indicates the member is uninitialized. If + // non-zero this value must be larger than or equal to luma width. If stride is + // uninitialized then it is assumed to be equal to luma width. + size_t luma_stride = 0; + // Stride of UV plane in number of pixels. + // 1. If this handle points to P010 image then this value must be larger than + // or equal to luma width. + // 2. If this handle points to 420 image then this value must be larger than + // or equal to (luma width / 2). + // NOTE: if chroma_data is nullptr, chroma_stride is irrelevant. Just as the way, + // chroma_data is derived from luma ptr, chroma stride is derived from luma stride. + size_t chroma_stride = 0; +}; + +/* + * Holds information for compressed image or gain map. + */ +struct jpegr_compressed_struct { + // Pointer to the data location. + void* data; + // Used data length in bytes. + int length; + // Maximum available data length in bytes. + int maxLength; + // Color gamut. + ultrahdr_color_gamut colorGamut; +}; + +/* + * Holds information for EXIF metadata. + */ +struct jpegr_exif_struct { + // Pointer to the data location. + void* data; + // Data length; + size_t length; +}; + +typedef struct jpegr_uncompressed_struct* jr_uncompressed_ptr; +typedef struct jpegr_compressed_struct* jr_compressed_ptr; +typedef struct jpegr_exif_struct* jr_exif_ptr; +typedef struct jpegr_info_struct* jr_info_ptr; + +class JpegR { + public: + /* + * Experimental only + * + * Encode API-0 + * Compress JPEGR image from 10-bit HDR YUV. + * + * Tonemap the HDR input to a SDR image, generate gain map from the HDR and SDR images, + * compress SDR YUV to 8-bit JPEG and append the gain map to the end of the compressed + * JPEG. + * @param p010_image_ptr uncompressed HDR image in P010 color format + * @param hdr_tf transfer function of the HDR image + * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength} + * represents the maximum available size of the destination buffer, and it must be + * set before calling this method. If the encoded JPEGR size exceeds + * {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}. + * @param quality target quality of the JPEG encoding, must be in range of 0-100 where 100 is + * the highest quality + * @param exif pointer to the exif metadata. + * @return NO_ERROR if encoding succeeds, error code if error occurs. + */ + status_t encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, ultrahdr_transfer_function hdr_tf, + jr_compressed_ptr dest, int quality, jr_exif_ptr exif); + + /* + * Encode API-1 + * Compress JPEGR image from 10-bit HDR YUV and 8-bit SDR YUV. + * + * Generate gain map from the HDR and SDR inputs, compress SDR YUV to 8-bit JPEG and append + * the gain map to the end of the compressed JPEG. HDR and SDR inputs must be the same + * resolution. SDR input is assumed to use the sRGB transfer function. + * @param p010_image_ptr uncompressed HDR image in P010 color format + * @param yuv420_image_ptr uncompressed SDR image in YUV_420 color format + * @param hdr_tf transfer function of the HDR image + * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength} + * represents the maximum available size of the desitination buffer, and it must be + * set before calling this method. If the encoded JPEGR size exceeds + * {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}. + * @param quality target quality of the JPEG encoding, must be in range of 0-100 where 100 is + * the highest quality + * @param exif pointer to the exif metadata. + * @return NO_ERROR if encoding succeeds, error code if error occurs. + */ + status_t encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, jr_uncompressed_ptr yuv420_image_ptr, + ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest, int quality, + jr_exif_ptr exif); + + /* + * Encode API-2 + * Compress JPEGR image from 10-bit HDR YUV, 8-bit SDR YUV and compressed 8-bit JPEG. + * + * This method requires HAL Hardware JPEG encoder. + * + * Generate gain map from the HDR and SDR inputs, append the gain map to the end of the + * compressed JPEG. Adds an ICC profile if one isn't present in the input JPEG image. HDR and + * SDR inputs must be the same resolution and color space. SDR image is assumed to use the sRGB + * transfer function. + * @param p010_image_ptr uncompressed HDR image in P010 color format + * @param yuv420_image_ptr uncompressed SDR image in YUV_420 color format + * @param yuv420jpg_image_ptr SDR image compressed in jpeg format + * @param hdr_tf transfer function of the HDR image + * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength} + * represents the maximum available size of the desitination buffer, and it must be + * set before calling this method. If the encoded JPEGR size exceeds + * {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}. + * @return NO_ERROR if encoding succeeds, error code if error occurs. + */ + status_t encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, jr_uncompressed_ptr yuv420_image_ptr, + jr_compressed_ptr yuv420jpg_image_ptr, ultrahdr_transfer_function hdr_tf, + jr_compressed_ptr dest); + + /* + * Encode API-3 + * Compress JPEGR image from 10-bit HDR YUV and 8-bit SDR YUV. + * + * This method requires HAL Hardware JPEG encoder. + * + * Decode the compressed 8-bit JPEG image to YUV SDR, generate gain map from the HDR input + * and the decoded SDR result, append the gain map to the end of the compressed JPEG. Adds an + * ICC profile if one isn't present in the input JPEG image. HDR and SDR inputs must be the same + * resolution. JPEG image is assumed to use the sRGB transfer function. + * @param p010_image_ptr uncompressed HDR image in P010 color format + * @param yuv420jpg_image_ptr SDR image compressed in jpeg format + * @param hdr_tf transfer function of the HDR image + * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength} + * represents the maximum available size of the desitination buffer, and it must be + * set before calling this method. If the encoded JPEGR size exceeds + * {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}. + * @return NO_ERROR if encoding succeeds, error code if error occurs. + */ + status_t encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, jr_compressed_ptr yuv420jpg_image_ptr, + ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest); + + /* + * Encode API-4 + * Assemble JPEGR image from SDR JPEG and gainmap JPEG. + * + * Assemble the primary JPEG image, the gain map and the metadata to JPEG/R format. Adds an ICC + * profile if one isn't present in the input JPEG image. + * @param yuv420jpg_image_ptr SDR image compressed in jpeg format + * @param gainmapjpg_image_ptr gain map image compressed in jpeg format + * @param metadata metadata to be written in XMP of the primary jpeg + * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength} + * represents the maximum available size of the desitination buffer, and it must be + * set before calling this method. If the encoded JPEGR size exceeds + * {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}. + * @return NO_ERROR if encoding succeeds, error code if error occurs. + */ + status_t encodeJPEGR(jr_compressed_ptr yuv420jpg_image_ptr, + jr_compressed_ptr gainmapjpg_image_ptr, ultrahdr_metadata_ptr metadata, + jr_compressed_ptr dest); + + /* + * Decode API + * Decompress JPEGR image. + * + * This method assumes that the JPEGR image contains an ICC profile with primaries that match + * those of a color gamut that this library is aware of; Bt.709, Display-P3, or Bt.2100. It also + * assumes the base image uses the sRGB transfer function. + * + * This method only supports single gain map metadata values for fields that allow multi-channel + * metadata values. + * @param jpegr_image_ptr compressed JPEGR image. + * @param dest destination of the uncompressed JPEGR image. + * @param max_display_boost (optional) the maximum available boost supported by a display, + * the value must be greater than or equal to 1.0. + * @param exif destination of the decoded EXIF metadata. The default value is NULL where the + decoder will do nothing about it. If configured not NULL the decoder will write + EXIF data into this structure. The format is defined in {@code jpegr_exif_struct} + * @param output_format flag for setting output color format. Its value configures the output + color format. The default value is {@code JPEGR_OUTPUT_HDR_LINEAR}. + ---------------------------------------------------------------------- + | output_format | decoded color format to be written | + ---------------------------------------------------------------------- + | JPEGR_OUTPUT_SDR | RGBA_8888 | + ---------------------------------------------------------------------- + | JPEGR_OUTPUT_HDR_LINEAR | (default)RGBA_F16 linear | + ---------------------------------------------------------------------- + | JPEGR_OUTPUT_HDR_PQ | RGBA_1010102 PQ | + ---------------------------------------------------------------------- + | JPEGR_OUTPUT_HDR_HLG | RGBA_1010102 HLG | + ---------------------------------------------------------------------- + * @param gainmap_image_ptr destination of the decoded gain map. The default value is NULL + where the decoder will do nothing about it. If configured not NULL + the decoder will write the decoded gain_map data into this + structure. The format is defined in + {@code jpegr_uncompressed_struct}. + * @param metadata destination of the decoded metadata. The default value is NULL where the + decoder will do nothing about it. If configured not NULL the decoder will + write metadata into this structure. the format of metadata is defined in + {@code ultrahdr_metadata_struct}. + * @return NO_ERROR if decoding succeeds, error code if error occurs. + */ + status_t decodeJPEGR(jr_compressed_ptr jpegr_image_ptr, jr_uncompressed_ptr dest, + float max_display_boost = FLT_MAX, jr_exif_ptr exif = nullptr, + ultrahdr_output_format output_format = ULTRAHDR_OUTPUT_HDR_LINEAR, + jr_uncompressed_ptr gainmap_image_ptr = nullptr, + ultrahdr_metadata_ptr metadata = nullptr); + + /* + * Gets Info from JPEGR file without decoding it. + * + * This method only supports single gain map metadata values for fields that allow multi-channel + * metadata values. + * + * The output is filled jpegr_info structure + * @param jpegr_image_ptr compressed JPEGR image + * @param jpeg_image_info_ptr pointer to jpegr info struct. Members of jpegr_info + * are owned by the caller + * @return NO_ERROR if JPEGR parsing succeeds, error code otherwise + */ + status_t getJPEGRInfo(jr_compressed_ptr jpegr_image_ptr, jr_info_ptr jpeg_image_info_ptr); + + protected: + /* + * This method is called in the encoding pipeline. It will take the uncompressed 8-bit and + * 10-bit yuv images as input, and calculate the uncompressed gain map. The input images + * must be the same resolution. The SDR input is assumed to use the sRGB transfer function. + * + * @param yuv420_image_ptr uncompressed SDR image in YUV_420 color format + * @param p010_image_ptr uncompressed HDR image in P010 color format + * @param hdr_tf transfer function of the HDR image + * @param metadata everything but "version" is filled in this struct + * @param dest location at which gain map image is stored (caller responsible for memory + of data). + * @param sdr_is_601 if true, then use BT.601 decoding of YUV regardless of SDR image gamut + * @return NO_ERROR if calculation succeeds, error code if error occurs. + */ + status_t generateGainMap(jr_uncompressed_ptr yuv420_image_ptr, jr_uncompressed_ptr p010_image_ptr, + ultrahdr_transfer_function hdr_tf, ultrahdr_metadata_ptr metadata, + jr_uncompressed_ptr dest, bool sdr_is_601 = false); + + /* + * This method is called in the decoding pipeline. It will take the uncompressed (decoded) + * 8-bit yuv image, the uncompressed (decoded) gain map, and extracted JPEG/R metadata as + * input, and calculate the 10-bit recovered image. The recovered output image is the same + * color gamut as the SDR image, with HLG transfer function, and is in RGBA1010102 data format. + * The SDR image is assumed to use the sRGB transfer function. The SDR image is also assumed to + * be a decoded JPEG for the purpose of YUV interpration. + * + * @param yuv420_image_ptr uncompressed SDR image in YUV_420 color format + * @param gainmap_image_ptr pointer to uncompressed gain map image struct. + * @param metadata JPEG/R metadata extracted from XMP. + * @param output_format flag for setting output color format. if set to + * {@code JPEGR_OUTPUT_SDR}, decoder will only decode the primary image + * which is SDR. Default value is JPEGR_OUTPUT_HDR_LINEAR. + * @param max_display_boost the maximum available boost supported by a display + * @param dest reconstructed HDR image + * @return NO_ERROR if calculation succeeds, error code if error occurs. + */ + status_t applyGainMap(jr_uncompressed_ptr yuv420_image_ptr, jr_uncompressed_ptr gainmap_image_ptr, + ultrahdr_metadata_ptr metadata, ultrahdr_output_format output_format, + float max_display_boost, jr_uncompressed_ptr dest); + + private: + /* + * This method is called in the encoding pipeline. It will encode the gain map. + * + * @param gainmap_image_ptr pointer to uncompressed gain map image struct + * @param jpeg_enc_obj_ptr helper resource to compress gain map + * @return NO_ERROR if encoding succeeds, error code if error occurs. + */ + status_t compressGainMap(jr_uncompressed_ptr gainmap_image_ptr, + JpegEncoderHelper* jpeg_enc_obj_ptr); + + /* + * This method is called to separate primary image and gain map image from JPEGR + * + * @param jpegr_image_ptr pointer to compressed JPEGR image. + * @param primary_jpg_image_ptr destination of primary image + * @param gainmap_jpg_image_ptr destination of compressed gain map image + * @return NO_ERROR if calculation succeeds, error code if error occurs. + */ + status_t extractPrimaryImageAndGainMap(jr_compressed_ptr jpegr_image_ptr, + jr_compressed_ptr primary_jpg_image_ptr, + jr_compressed_ptr gainmap_jpg_image_ptr); + + /* + * This method is called in the encoding pipeline. It will take the standard 8-bit JPEG image, + * the compressed gain map and optionally the exif package as inputs, and generate the XMP + * metadata, and finally append everything in the order of: + * SOI, APP2(EXIF) (if EXIF is from outside), APP2(XMP), primary image, gain map + * + * Note that in the final JPEG/R output, EXIF package will appear if ONLY ONE of the following + * conditions is fulfilled: + * (1) EXIF package is available from outside input. I.e. pExif != nullptr. + * (2) Input JPEG has EXIF. + * If both conditions are fulfilled, this method will return ERROR_JPEGR_INVALID_INPUT_TYPE + * + * @param primary_jpg_image_ptr destination of primary image + * @param gainmap_jpg_image_ptr destination of compressed gain map image + * @param (nullable) pExif EXIF package + * @param (nullable) pIcc ICC package + * @param icc_size length in bytes of ICC package + * @param metadata JPEG/R metadata to encode in XMP of the jpeg + * @param dest compressed JPEGR image + * @return NO_ERROR if calculation succeeds, error code if error occurs. + */ + status_t appendGainMap(jr_compressed_ptr primary_jpg_image_ptr, + jr_compressed_ptr gainmap_jpg_image_ptr, jr_exif_ptr pExif, void* pIcc, + size_t icc_size, ultrahdr_metadata_ptr metadata, jr_compressed_ptr dest); + + /* + * This method will tone map a HDR image to an SDR image. + * + * @param src pointer to uncompressed HDR image struct. HDR image is expected to be + * in p010 color format + * @param dest pointer to store tonemapped SDR image + */ + status_t toneMap(jr_uncompressed_ptr src, jr_uncompressed_ptr dest); + + /* + * This method will convert a YUV420 image from one YUV encoding to another in-place (eg. + * Bt.709 to Bt.601 YUV encoding). + * + * src_encoding and dest_encoding indicate the encoding via the YUV conversion defined for that + * gamut. P3 indicates Rec.601, since this is how DataSpace encodes Display-P3 YUV data. + * + * @param image the YUV420 image to convert + * @param src_encoding input YUV encoding + * @param dest_encoding output YUV encoding + * @return NO_ERROR if calculation succeeds, error code if error occurs. + */ + status_t convertYuv(jr_uncompressed_ptr image, ultrahdr_color_gamut src_encoding, + ultrahdr_color_gamut dest_encoding); + + /* + * This method will check the validity of the input arguments. + * + * @param p010_image_ptr uncompressed HDR image in P010 color format + * @param yuv420_image_ptr pointer to uncompressed SDR image struct. HDR image is expected to + * be in 420p color format + * @param hdr_tf transfer function of the HDR image + * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength} + * represents the maximum available size of the desitination buffer, and it must be + * set before calling this method. If the encoded JPEGR size exceeds + * {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}. + * @return NO_ERROR if the input args are valid, error code is not valid. + */ + status_t areInputArgumentsValid(jr_uncompressed_ptr p010_image_ptr, + jr_uncompressed_ptr yuv420_image_ptr, + ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest_ptr); + + /* + * This method will check the validity of the input arguments. + * + * @param p010_image_ptr uncompressed HDR image in P010 color format + * @param yuv420_image_ptr pointer to uncompressed SDR image struct. HDR image is expected to + * be in 420p color format + * @param hdr_tf transfer function of the HDR image + * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength} + * represents the maximum available size of the destination buffer, and it must be + * set before calling this method. If the encoded JPEGR size exceeds + * {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}. + * @param quality target quality of the JPEG encoding, must be in range of 0-100 where 100 is + * the highest quality + * @return NO_ERROR if the input args are valid, error code is not valid. + */ + status_t areInputArgumentsValid(jr_uncompressed_ptr p010_image_ptr, + jr_uncompressed_ptr yuv420_image_ptr, + ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest, + int quality); +}; +} // namespace ultrahdr + +#endif // ULTRAHDR_JPEGR_H diff --git a/lib/include/ultrahdr/jpegrutils.h b/lib/include/ultrahdr/jpegrutils.h new file mode 100644 index 0000000..d5bfa0b --- /dev/null +++ b/lib/include/ultrahdr/jpegrutils.h @@ -0,0 +1,152 @@ +/* + * Copyright 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ULTRAHDR_JPEGRUTILS_H +#define ULTRAHDR_JPEGRUTILS_H + +#include "ultrahdr/ultrahdr.h" +#include "ultrahdr/jpegr.h" + +namespace ultrahdr { + +static constexpr uint32_t EndianSwap32(uint32_t value) { + return ((value & 0xFF) << 24) | ((value & 0xFF00) << 8) | ((value & 0xFF0000) >> 8) | + (value >> 24); +} +static inline uint16_t EndianSwap16(uint16_t value) { + return static_cast((value >> 8) | ((value & 0xFF) << 8)); +} + +struct ultrahdr_metadata_struct; +/* + * Mutable data structure. Holds information for metadata. + */ +class DataStruct { + private: + void* data; + int writePos; + int length; + + public: + DataStruct(int s); + ~DataStruct(); + + void* getData(); + int getLength(); + int getBytesWritten(); + bool write8(uint8_t value); + bool write16(uint16_t value); + bool write32(uint32_t value); + bool write(const void* src, int size); +}; + +/* + * Helper function used for writing data to destination. + * + * @param destination destination of the data to be written. + * @param source source of data being written. + * @param length length of the data to be written. + * @param position cursor in desitination where the data is to be written. + * @return status of succeed or error code. + */ +status_t Write(jr_compressed_ptr destination, const void* source, size_t length, int& position); + +/* + * Parses XMP packet and fills metadata with data from XMP + * + * @param xmp_data pointer to XMP packet + * @param xmp_size size of XMP packet + * @param metadata place to store HDR metadata values + * @return true if metadata is successfully retrieved, false otherwise + */ +bool getMetadataFromXMP(uint8_t* xmp_data, size_t xmp_size, ultrahdr_metadata_struct* metadata); + +/* + * This method generates XMP metadata for the primary image. + * + * below is an example of the XMP metadata that this function generates where + * secondary_image_length = 1000 + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * @param secondary_image_length length of secondary image + * @return XMP metadata in type of string + */ +std::string generateXmpForPrimaryImage(int secondary_image_length, + ultrahdr_metadata_struct& metadata); + +/* + * This method generates XMP metadata for the recovery map image. + * + * below is an example of the XMP metadata that this function generates where + * max_content_boost = 8.0 + * min_content_boost = 0.5 + * + * + * + * + * + * + * + * @param metadata JPEG/R metadata to encode as XMP + * @return XMP metadata in type of string + */ +std::string generateXmpForSecondaryImage(ultrahdr_metadata_struct& metadata); +} // namespace ultrahdr + +#endif // ULTRAHDR_JPEGRUTILS_H diff --git a/lib/include/ultrahdr/multipictureformat.h b/lib/include/ultrahdr/multipictureformat.h new file mode 100644 index 0000000..9a9141b --- /dev/null +++ b/lib/include/ultrahdr/multipictureformat.h @@ -0,0 +1,78 @@ +/* + * Copyright 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ULTRAHDR_MULTIPICTUREFORMAT_H +#define ULTRAHDR_MULTIPICTUREFORMAT_H + +#include + +#ifndef USE_BIG_ENDIAN_IN_MPF +#define USE_BIG_ENDIAN_IN_MPF true +#endif + +#undef Endian_SwapBE32 +#undef Endian_SwapBE16 +#if USE_BIG_ENDIAN_IN_MPF +#define Endian_SwapBE32(n) EndianSwap32(n) +#define Endian_SwapBE16(n) EndianSwap16(n) +#else +#define Endian_SwapBE32(n) (n) +#define Endian_SwapBE16(n) (n) +#endif + +#include "ultrahdr/ultrahdr.h" +#include "ultrahdr/jpegr.h" +#include "ultrahdr/gainmapmath.h" +#include "ultrahdr/jpegrutils.h" + +namespace ultrahdr { + +constexpr size_t kNumPictures = 2; +constexpr size_t kMpEndianSize = 4; +constexpr uint16_t kTagSerializedCount = 3; +constexpr uint32_t kTagSize = 12; + +constexpr uint16_t kTypeLong = 0x4; +constexpr uint16_t kTypeUndefined = 0x7; + +static constexpr uint8_t kMpfSig[] = {'M', 'P', 'F', '\0'}; +constexpr uint8_t kMpLittleEndian[kMpEndianSize] = {0x49, 0x49, 0x2A, 0x00}; +constexpr uint8_t kMpBigEndian[kMpEndianSize] = {0x4D, 0x4D, 0x00, 0x2A}; + +constexpr uint16_t kVersionTag = 0xB000; +constexpr uint16_t kVersionType = kTypeUndefined; +constexpr uint32_t kVersionCount = 4; +constexpr size_t kVersionSize = 4; +constexpr uint8_t kVersionExpected[kVersionSize] = {'0', '1', '0', '0'}; + +constexpr uint16_t kNumberOfImagesTag = 0xB001; +constexpr uint16_t kNumberOfImagesType = kTypeLong; +constexpr uint32_t kNumberOfImagesCount = 1; + +constexpr uint16_t kMPEntryTag = 0xB002; +constexpr uint16_t kMPEntryType = kTypeUndefined; +constexpr uint32_t kMPEntrySize = 16; + +constexpr uint32_t kMPEntryAttributeFormatJpeg = 0x0000000; +constexpr uint32_t kMPEntryAttributeTypePrimary = 0x030000; + +size_t calculateMpfSize(); +std::shared_ptr generateMpf(int primary_image_size, int primary_image_offset, + int secondary_image_size, int secondary_image_offset); + +} // namespace ultrahdr + +#endif // ULTRAHDR_MULTIPICTUREFORMAT_H diff --git a/lib/include/ultrahdr/ultrahdr.h b/lib/include/ultrahdr/ultrahdr.h new file mode 100644 index 0000000..fa69d57 --- /dev/null +++ b/lib/include/ultrahdr/ultrahdr.h @@ -0,0 +1,82 @@ +/* + * Copyright 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ULTRAHDR_ULTRAHDR_H +#define ULTRAHDR_ULTRAHDR_H + +#include + +namespace ultrahdr { +// Color gamuts for image data +typedef enum { + ULTRAHDR_COLORGAMUT_UNSPECIFIED = -1, + ULTRAHDR_COLORGAMUT_BT709, + ULTRAHDR_COLORGAMUT_P3, + ULTRAHDR_COLORGAMUT_BT2100, + ULTRAHDR_COLORGAMUT_MAX = ULTRAHDR_COLORGAMUT_BT2100, +} ultrahdr_color_gamut; + +// Transfer functions for image data +// TODO: TF LINEAR is deprecated, remove this enum and the code surrounding it. +typedef enum { + ULTRAHDR_TF_UNSPECIFIED = -1, + ULTRAHDR_TF_LINEAR = 0, + ULTRAHDR_TF_HLG = 1, + ULTRAHDR_TF_PQ = 2, + ULTRAHDR_TF_SRGB = 3, + ULTRAHDR_TF_MAX = ULTRAHDR_TF_SRGB, +} ultrahdr_transfer_function; + +// Target output formats for decoder +typedef enum { + ULTRAHDR_OUTPUT_UNSPECIFIED = -1, + ULTRAHDR_OUTPUT_SDR, // SDR in RGBA_8888 color format + ULTRAHDR_OUTPUT_HDR_LINEAR, // HDR in F16 color format (linear) + ULTRAHDR_OUTPUT_HDR_PQ, // HDR in RGBA_1010102 color format (PQ transfer function) + ULTRAHDR_OUTPUT_HDR_HLG, // HDR in RGBA_1010102 color format (HLG transfer function) + ULTRAHDR_OUTPUT_MAX = ULTRAHDR_OUTPUT_HDR_HLG, +} ultrahdr_output_format; + +/* + * Holds information for gain map related metadata. + * + * Not: all values stored in linear. This differs from the metadata encoding in XMP, where + * maxContentBoost (aka gainMapMax), minContentBoost (aka gainMapMin), hdrCapacityMin, and + * hdrCapacityMax are stored in log2 space. + */ +struct ultrahdr_metadata_struct { + // Ultra HDR format version + std::string version; + // Max Content Boost for the map + float maxContentBoost; + // Min Content Boost for the map + float minContentBoost; + // Gamma of the map data + float gamma; + // Offset for SDR data in map calculations + float offsetSdr; + // Offset for HDR data in map calculations + float offsetHdr; + // HDR capacity to apply the map at all + float hdrCapacityMin; + // HDR capacity to apply the map completely + float hdrCapacityMax; +}; +typedef struct ultrahdr_metadata_struct* ultrahdr_metadata_ptr; + +} // namespace ultrahdr + +#endif // ULTRAHDR_ULTRAHDR_H diff --git a/lib/include/ultrahdr/ultrahdrcommon.h b/lib/include/ultrahdr/ultrahdrcommon.h new file mode 100644 index 0000000..ba3a3b8 --- /dev/null +++ b/lib/include/ultrahdr/ultrahdrcommon.h @@ -0,0 +1,64 @@ +/* + * Copyright 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ULTRAHDR_ULTRAHDRCOMMON_H +#define ULTRAHDR_ULTRAHDRCOMMON_H + +//#define LOG_NDEBUG 0 + +#ifdef __ANDROID__ +#include "log/log.h" +#else +#ifdef LOG_NDEBUG +#include + +#define ALOGD(...) \ + do { \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + } while (0) +#define ALOGE(...) \ + do { \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + } while (0) +#define ALOGI(...) \ + do { \ + fprintf(stdout, __VA_ARGS__); \ + fprintf(stdout, "\n"); \ + } while (0) +#define ALOGV(...) \ + do { \ + fprintf(stdout, __VA_ARGS__); \ + fprintf(stdout, "\n"); \ + } while (0) +#define ALOGW(...) \ + do { \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + } while (0) +#else +#define ALOGD(...) ((void)0) +#define ALOGE(...) ((void)0) +#define ALOGI(...) ((void)0) +#define ALOGV(...) ((void)0) +#define ALOGW(...) ((void)0) +#endif +#endif + +#define ALIGNM(x, m) ((((x) + ((m)-1)) / (m)) * (m)) + +#endif // ULTRAHDR_ULTRAHDRCOMMON_H diff --git a/lib/jpegdecoderhelper.cpp b/lib/jpegdecoderhelper.cpp deleted file mode 100644 index eb55a2e..0000000 --- a/lib/jpegdecoderhelper.cpp +++ /dev/null @@ -1,537 +0,0 @@ -/* - * Copyright 2022 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -#include - -#include "ultrahdrcommon.h" -#include "ultrahdr.h" -#include "jpegdecoderhelper.h" - -using namespace std; - -namespace ultrahdr { - -const uint32_t kAPP0Marker = JPEG_APP0; // JFIF -const uint32_t kAPP1Marker = JPEG_APP0 + 1; // EXIF, XMP -const uint32_t kAPP2Marker = JPEG_APP0 + 2; // ICC - -constexpr uint32_t kICCMarkerHeaderSize = 14; -constexpr uint8_t kICCSig[] = { - 'I', 'C', 'C', '_', 'P', 'R', 'O', 'F', 'I', 'L', 'E', '\0', -}; -constexpr uint8_t kXmpNameSpace[] = { - 'h', 't', 't', 'p', ':', '/', '/', 'n', 's', '.', 'a', 'd', 'o', 'b', 'e', - '.', 'c', 'o', 'm', '/', 'x', 'a', 'p', '/', '1', '.', '0', '/', '\0', -}; -constexpr uint8_t kExifIdCode[] = { - 'E', 'x', 'i', 'f', '\0', '\0', -}; - -struct jpegr_source_mgr : jpeg_source_mgr { - jpegr_source_mgr(const uint8_t* ptr, int len); - ~jpegr_source_mgr(); - - const uint8_t* mBufferPtr; - size_t mBufferLength; -}; - -struct jpegrerror_mgr { - struct jpeg_error_mgr pub; - jmp_buf setjmp_buffer; -}; - -static void jpegr_init_source(j_decompress_ptr cinfo) { - jpegr_source_mgr* src = static_cast(cinfo->src); - src->next_input_byte = static_cast(src->mBufferPtr); - src->bytes_in_buffer = src->mBufferLength; -} - -static boolean jpegr_fill_input_buffer(j_decompress_ptr /* cinfo */) { - ALOGE("%s : should not get here", __func__); - return FALSE; -} - -static void jpegr_skip_input_data(j_decompress_ptr cinfo, long num_bytes) { - jpegr_source_mgr* src = static_cast(cinfo->src); - - if (num_bytes > static_cast(src->bytes_in_buffer)) { - ALOGE("jpegr_skip_input_data - num_bytes > (long)src->bytes_in_buffer"); - } else { - src->next_input_byte += num_bytes; - src->bytes_in_buffer -= num_bytes; - } -} - -static void jpegr_term_source(j_decompress_ptr /*cinfo*/) {} - -jpegr_source_mgr::jpegr_source_mgr(const uint8_t* ptr, int len) - : mBufferPtr(ptr), mBufferLength(len) { - init_source = jpegr_init_source; - fill_input_buffer = jpegr_fill_input_buffer; - skip_input_data = jpegr_skip_input_data; - resync_to_restart = jpeg_resync_to_restart; - term_source = jpegr_term_source; -} - -jpegr_source_mgr::~jpegr_source_mgr() {} - -static void jpegrerror_exit(j_common_ptr cinfo) { - jpegrerror_mgr* err = reinterpret_cast(cinfo->err); - longjmp(err->setjmp_buffer, 1); -} - -static void output_message(j_common_ptr cinfo) { - char buffer[JMSG_LENGTH_MAX]; - - /* Create the message */ - (*cinfo->err->format_message)(cinfo, buffer); - ALOGE("%s\n", buffer); -} - -JpegDecoderHelper::JpegDecoderHelper() {} - -JpegDecoderHelper::~JpegDecoderHelper() {} - -bool JpegDecoderHelper::decompressImage(const void* image, int length, bool decodeToRGBA) { - if (image == nullptr || length <= 0) { - ALOGE("Image size can not be handled: %d", length); - return false; - } - mResultBuffer.clear(); - mXMPBuffer.clear(); - return decode(image, length, decodeToRGBA); -} - -void* JpegDecoderHelper::getDecompressedImagePtr() { return mResultBuffer.data(); } - -size_t JpegDecoderHelper::getDecompressedImageSize() { return mResultBuffer.size(); } - -void* JpegDecoderHelper::getXMPPtr() { return mXMPBuffer.data(); } - -size_t JpegDecoderHelper::getXMPSize() { return mXMPBuffer.size(); } - -void* JpegDecoderHelper::getEXIFPtr() { return mEXIFBuffer.data(); } - -size_t JpegDecoderHelper::getEXIFSize() { return mEXIFBuffer.size(); } - -void* JpegDecoderHelper::getICCPtr() { return mICCBuffer.data(); } - -size_t JpegDecoderHelper::getICCSize() { return mICCBuffer.size(); } - -size_t JpegDecoderHelper::getDecompressedImageWidth() { return mWidth; } - -size_t JpegDecoderHelper::getDecompressedImageHeight() { return mHeight; } - -// Here we only handle the first EXIF package, and in theary EXIF (or JFIF) must be the first -// in the image file. -// We assume that all packages are starting with two bytes marker (eg FF E1 for EXIF package), -// two bytes of package length which is stored in marker->original_length, and the real data -// which is stored in marker->data. -bool JpegDecoderHelper::extractEXIF(const void* image, int length) { - jpeg_decompress_struct cinfo; - jpegr_source_mgr mgr(static_cast(image), length); - jpegrerror_mgr myerr; - - cinfo.err = jpeg_std_error(&myerr.pub); - myerr.pub.error_exit = jpegrerror_exit; - myerr.pub.output_message = output_message; - - if (setjmp(myerr.setjmp_buffer)) { - jpeg_destroy_decompress(&cinfo); - return false; - } - jpeg_create_decompress(&cinfo); - - jpeg_save_markers(&cinfo, kAPP0Marker, 0xFFFF); - jpeg_save_markers(&cinfo, kAPP1Marker, 0xFFFF); - - cinfo.src = &mgr; - jpeg_read_header(&cinfo, TRUE); - - size_t pos = 2; // position after SOI - for (jpeg_marker_struct* marker = cinfo.marker_list; marker; marker = marker->next) { - pos += 4; - pos += marker->original_length; - - if (marker->marker != kAPP1Marker) { - continue; - } - - const unsigned int len = marker->data_length; - - if (len > sizeof(kExifIdCode) && !memcmp(marker->data, kExifIdCode, sizeof(kExifIdCode))) { - mEXIFBuffer.resize(len, 0); - memcpy(static_cast(mEXIFBuffer.data()), marker->data, len); - mExifPos = pos - marker->original_length; - break; - } - } - - jpeg_destroy_decompress(&cinfo); - return true; -} - -bool JpegDecoderHelper::decode(const void* image, int length, bool decodeToRGBA) { - bool status = true; - jpeg_decompress_struct cinfo; - jpegrerror_mgr myerr; - cinfo.err = jpeg_std_error(&myerr.pub); - myerr.pub.error_exit = jpegrerror_exit; - myerr.pub.output_message = output_message; - - if (setjmp(myerr.setjmp_buffer)) { - jpeg_destroy_decompress(&cinfo); - return false; - } - - jpeg_create_decompress(&cinfo); - - jpeg_save_markers(&cinfo, kAPP0Marker, 0xFFFF); - jpeg_save_markers(&cinfo, kAPP1Marker, 0xFFFF); - jpeg_save_markers(&cinfo, kAPP2Marker, 0xFFFF); - - jpegr_source_mgr mgr(static_cast(image), length); - cinfo.src = &mgr; - if (jpeg_read_header(&cinfo, TRUE) != JPEG_HEADER_OK) { - jpeg_destroy_decompress(&cinfo); - return false; - } - - // Save XMP data, EXIF data, and ICC data. - // Here we only handle the first XMP / EXIF / ICC package. - // We assume that all packages are starting with two bytes marker (eg FF E1 for EXIF package), - // two bytes of package length which is stored in marker->original_length, and the real data - // which is stored in marker->data. - bool exifAppears = false; - bool xmpAppears = false; - bool iccAppears = false; - size_t pos = 2; // position after SOI - for (jpeg_marker_struct* marker = cinfo.marker_list; - marker && !(exifAppears && xmpAppears && iccAppears); marker = marker->next) { - pos += 4; - pos += marker->original_length; - if (marker->marker != kAPP1Marker && marker->marker != kAPP2Marker) { - continue; - } - const unsigned int len = marker->data_length; - if (!xmpAppears && len > sizeof(kXmpNameSpace) && - !memcmp(marker->data, kXmpNameSpace, sizeof(kXmpNameSpace))) { - mXMPBuffer.resize(len + 1, 0); - memcpy(static_cast(mXMPBuffer.data()), marker->data, len); - xmpAppears = true; - } else if (!exifAppears && len > sizeof(kExifIdCode) && - !memcmp(marker->data, kExifIdCode, sizeof(kExifIdCode))) { - mEXIFBuffer.resize(len, 0); - memcpy(static_cast(mEXIFBuffer.data()), marker->data, len); - exifAppears = true; - mExifPos = pos - marker->original_length; - } else if (!iccAppears && len > sizeof(kICCSig) && - !memcmp(marker->data, kICCSig, sizeof(kICCSig))) { - mICCBuffer.resize(len, 0); - memcpy(static_cast(mICCBuffer.data()), marker->data, len); - iccAppears = true; - } - } - - mWidth = cinfo.image_width; - mHeight = cinfo.image_height; - if (mWidth > kMaxWidth || mHeight > kMaxHeight) { - status = false; - goto CleanUp; - } - - if (decodeToRGBA) { - // The primary image is expected to be yuv420 sampling - if (cinfo.jpeg_color_space != JCS_YCbCr) { - status = false; - ALOGE("%s: decodeToRGBA unexpected jpeg color space ", __func__); - goto CleanUp; - } - if (cinfo.comp_info[0].h_samp_factor != 2 || cinfo.comp_info[0].v_samp_factor != 2 || - cinfo.comp_info[1].h_samp_factor != 1 || cinfo.comp_info[1].v_samp_factor != 1 || - cinfo.comp_info[2].h_samp_factor != 1 || cinfo.comp_info[2].v_samp_factor != 1) { - status = false; - ALOGE("%s: decodeToRGBA unexpected primary image sub-sampling", __func__); - goto CleanUp; - } - // 4 bytes per pixel - mResultBuffer.resize(cinfo.image_width * cinfo.image_height * 4); - cinfo.out_color_space = JCS_EXT_RGBA; - } else { - if (cinfo.jpeg_color_space == JCS_YCbCr) { - if (cinfo.comp_info[0].h_samp_factor != 2 || cinfo.comp_info[0].v_samp_factor != 2 || - cinfo.comp_info[1].h_samp_factor != 1 || cinfo.comp_info[1].v_samp_factor != 1 || - cinfo.comp_info[2].h_samp_factor != 1 || cinfo.comp_info[2].v_samp_factor != 1) { - status = false; - ALOGE("%s: decoding to YUV only supports 4:2:0 subsampling", __func__); - goto CleanUp; - } - mResultBuffer.resize(cinfo.image_width * cinfo.image_height * 3 / 2, 0); - } else if (cinfo.jpeg_color_space == JCS_GRAYSCALE) { - mResultBuffer.resize(cinfo.image_width * cinfo.image_height, 0); - } else { - status = false; - ALOGE("%s: decodeToYUV unexpected jpeg color space", __func__); - goto CleanUp; - } - cinfo.out_color_space = cinfo.jpeg_color_space; - cinfo.raw_data_out = TRUE; - } - - cinfo.dct_method = JDCT_ISLOW; - jpeg_start_decompress(&cinfo); - if (!decompress(&cinfo, static_cast(mResultBuffer.data()), - cinfo.jpeg_color_space == JCS_GRAYSCALE)) { - status = false; - goto CleanUp; - } - -CleanUp: - jpeg_finish_decompress(&cinfo); - jpeg_destroy_decompress(&cinfo); - - return status; -} - -bool JpegDecoderHelper::decompress(jpeg_decompress_struct* cinfo, const uint8_t* dest, - bool isSingleChannel) { - return isSingleChannel ? decompressSingleChannel(cinfo, dest) - : ((cinfo->out_color_space == JCS_EXT_RGBA) ? decompressRGBA(cinfo, dest) - : decompressYUV(cinfo, dest)); -} - -bool JpegDecoderHelper::getCompressedImageParameters(const void* image, int length, size_t* pWidth, - size_t* pHeight, std::vector* iccData, - std::vector* exifData) { - jpeg_decompress_struct cinfo; - jpegrerror_mgr myerr; - cinfo.err = jpeg_std_error(&myerr.pub); - myerr.pub.error_exit = jpegrerror_exit; - myerr.pub.output_message = output_message; - - if (setjmp(myerr.setjmp_buffer)) { - jpeg_destroy_decompress(&cinfo); - return false; - } - jpeg_create_decompress(&cinfo); - - jpeg_save_markers(&cinfo, kAPP1Marker, 0xFFFF); - jpeg_save_markers(&cinfo, kAPP2Marker, 0xFFFF); - - jpegr_source_mgr mgr(static_cast(image), length); - cinfo.src = &mgr; - if (jpeg_read_header(&cinfo, TRUE) != JPEG_HEADER_OK) { - jpeg_destroy_decompress(&cinfo); - return false; - } - - if (pWidth != nullptr) { - *pWidth = cinfo.image_width; - } - if (pHeight != nullptr) { - *pHeight = cinfo.image_height; - } - - if (iccData != nullptr) { - for (jpeg_marker_struct* marker = cinfo.marker_list; marker; marker = marker->next) { - if (marker->marker != kAPP2Marker) { - continue; - } - if (marker->data_length <= kICCMarkerHeaderSize || - memcmp(marker->data, kICCSig, sizeof(kICCSig)) != 0) { - continue; - } - - iccData->insert(iccData->end(), marker->data, marker->data + marker->data_length); - } - } - - if (exifData != nullptr) { - bool exifAppears = false; - for (jpeg_marker_struct* marker = cinfo.marker_list; marker && !exifAppears; - marker = marker->next) { - if (marker->marker != kAPP1Marker) { - continue; - } - - const unsigned int len = marker->data_length; - if (len >= sizeof(kExifIdCode) && !memcmp(marker->data, kExifIdCode, sizeof(kExifIdCode))) { - exifData->resize(len, 0); - memcpy(static_cast(exifData->data()), marker->data, len); - exifAppears = true; - } - } - } - - jpeg_destroy_decompress(&cinfo); - return true; -} - -bool JpegDecoderHelper::decompressRGBA(jpeg_decompress_struct* cinfo, const uint8_t* dest) { - JSAMPLE* out = (JSAMPLE*)dest; - - while (cinfo->output_scanline < cinfo->image_height) { - if (1 != jpeg_read_scanlines(cinfo, &out, 1)) return false; - out += cinfo->image_width * 4; - } - return true; -} - -bool JpegDecoderHelper::decompressYUV(jpeg_decompress_struct* cinfo, const uint8_t* dest) { - size_t luma_plane_size = cinfo->image_width * cinfo->image_height; - size_t chroma_plane_size = luma_plane_size / 4; - uint8_t* y_plane = const_cast(dest); - uint8_t* u_plane = const_cast(dest + luma_plane_size); - uint8_t* v_plane = const_cast(dest + luma_plane_size + chroma_plane_size); - - const size_t aligned_width = ALIGNM(cinfo->image_width, kCompressBatchSize); - const bool is_width_aligned = (aligned_width == cinfo->image_width); - uint8_t* y_plane_intrm = nullptr; - uint8_t* u_plane_intrm = nullptr; - uint8_t* v_plane_intrm = nullptr; - - JSAMPROW y[kCompressBatchSize]; - JSAMPROW cb[kCompressBatchSize / 2]; - JSAMPROW cr[kCompressBatchSize / 2]; - JSAMPARRAY planes[3]{y, cb, cr}; - JSAMPROW y_intrm[kCompressBatchSize]; - JSAMPROW cb_intrm[kCompressBatchSize / 2]; - JSAMPROW cr_intrm[kCompressBatchSize / 2]; - JSAMPARRAY planes_intrm[3]{y_intrm, cb_intrm, cr_intrm}; - - if (cinfo->image_height % kCompressBatchSize != 0) { - mEmpty = std::make_unique(aligned_width); - } - - if (!is_width_aligned) { - size_t mcu_row_size = aligned_width * kCompressBatchSize * 3 / 2; - mBufferIntermediate = std::make_unique(mcu_row_size); - y_plane_intrm = mBufferIntermediate.get(); - u_plane_intrm = y_plane_intrm + (aligned_width * kCompressBatchSize); - v_plane_intrm = u_plane_intrm + (aligned_width * kCompressBatchSize) / 4; - for (int i = 0; i < kCompressBatchSize; ++i) { - y_intrm[i] = y_plane_intrm + i * aligned_width; - } - for (int i = 0; i < kCompressBatchSize / 2; ++i) { - int offset_intrm = i * (aligned_width / 2); - cb_intrm[i] = u_plane_intrm + offset_intrm; - cr_intrm[i] = v_plane_intrm + offset_intrm; - } - } - - while (cinfo->output_scanline < cinfo->image_height) { - size_t scanline_copy = cinfo->output_scanline; - for (int i = 0; i < kCompressBatchSize; ++i) { - size_t scanline = cinfo->output_scanline + i; - if (scanline < cinfo->image_height) { - y[i] = y_plane + scanline * cinfo->image_width; - } else { - y[i] = mEmpty.get(); - } - } - // cb, cr only have half scanlines - for (int i = 0; i < kCompressBatchSize / 2; ++i) { - size_t scanline = cinfo->output_scanline / 2 + i; - if (scanline < cinfo->image_height / 2) { - int offset = scanline * (cinfo->image_width / 2); - cb[i] = u_plane + offset; - cr[i] = v_plane + offset; - } else { - cb[i] = cr[i] = mEmpty.get(); - } - } - - int processed = - jpeg_read_raw_data(cinfo, is_width_aligned ? planes : planes_intrm, kCompressBatchSize); - if (processed != kCompressBatchSize) { - ALOGE("Number of processed lines does not equal input lines."); - return false; - } - if (!is_width_aligned) { - for (int i = 0; i < kCompressBatchSize; ++i) { - if (scanline_copy + i < cinfo->image_height) { - memcpy(y[i], y_intrm[i], cinfo->image_width); - } - } - for (int i = 0; i < kCompressBatchSize / 2; ++i) { - if (((scanline_copy / 2) + i) < (cinfo->image_height / 2)) { - memcpy(cb[i], cb_intrm[i], cinfo->image_width / 2); - memcpy(cr[i], cr_intrm[i], cinfo->image_width / 2); - } - } - } - } - return true; -} - -bool JpegDecoderHelper::decompressSingleChannel(jpeg_decompress_struct* cinfo, - const uint8_t* dest) { - uint8_t* y_plane = const_cast(dest); - uint8_t* y_plane_intrm = nullptr; - - const size_t aligned_width = ALIGNM(cinfo->image_width, kCompressBatchSize); - const bool is_width_aligned = (aligned_width == cinfo->image_width); - - JSAMPROW y[kCompressBatchSize]; - JSAMPARRAY planes[1]{y}; - JSAMPROW y_intrm[kCompressBatchSize]; - JSAMPARRAY planes_intrm[1]{y_intrm}; - - if (cinfo->image_height % kCompressBatchSize != 0) { - mEmpty = std::make_unique(aligned_width); - } - - if (!is_width_aligned) { - size_t mcu_row_size = aligned_width * kCompressBatchSize; - mBufferIntermediate = std::make_unique(mcu_row_size); - y_plane_intrm = mBufferIntermediate.get(); - for (int i = 0; i < kCompressBatchSize; ++i) { - y_intrm[i] = y_plane_intrm + i * aligned_width; - } - } - - while (cinfo->output_scanline < cinfo->image_height) { - size_t scanline_copy = cinfo->output_scanline; - for (int i = 0; i < kCompressBatchSize; ++i) { - size_t scanline = cinfo->output_scanline + i; - if (scanline < cinfo->image_height) { - y[i] = y_plane + scanline * cinfo->image_width; - } else { - y[i] = mEmpty.get(); - } - } - - int processed = - jpeg_read_raw_data(cinfo, is_width_aligned ? planes : planes_intrm, kCompressBatchSize); - if (processed != kCompressBatchSize / 2) { - ALOGE("Number of processed lines does not equal input lines."); - return false; - } - if (!is_width_aligned) { - for (int i = 0; i < kCompressBatchSize; ++i) { - if (scanline_copy + i < cinfo->image_height) { - memcpy(y[i], y_intrm[i], cinfo->image_width); - } - } - } - } - return true; -} - -} // namespace ultrahdr diff --git a/lib/jpegdecoderhelper.h b/lib/jpegdecoderhelper.h deleted file mode 100644 index 01a05e4..0000000 --- a/lib/jpegdecoderhelper.h +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright 2022 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ULTRAHDR_JPEGDECODERHELPER_H -#define ULTRAHDR_JPEGDECODERHELPER_H - -#include // For jpeglib.h. - -// C++ build requires extern C for jpeg internals. -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include - -#ifdef __cplusplus -} // extern "C" -#endif - -#include -#include -#include - -// constraint on max width and max height is only due to device alloc constraints -// Can tune these values basing on the target device -static const int kMaxWidth = 8192; -static const int kMaxHeight = 8192; - -namespace ultrahdr { -/* - * Encapsulates a converter from JPEG to raw image (YUV420planer or grey-scale) format. - * This class is not thread-safe. - */ -class JpegDecoderHelper { - public: - JpegDecoderHelper(); - ~JpegDecoderHelper(); - /* - * Decompresses JPEG image to raw image (YUV420planer, grey-scale or RGBA) format. After - * calling this method, call getDecompressedImage() to get the image. - * Returns false if decompressing the image fails. - */ - bool decompressImage(const void* image, int length, bool decodeToRGBA = false); - /* - * Returns the decompressed raw image buffer pointer. This method must be called only after - * calling decompressImage(). - */ - void* getDecompressedImagePtr(); - /* - * Returns the decompressed raw image buffer size. This method must be called only after - * calling decompressImage(). - */ - size_t getDecompressedImageSize(); - /* - * Returns the image width in pixels. This method must be called only after calling - * decompressImage(). - */ - size_t getDecompressedImageWidth(); - /* - * Returns the image width in pixels. This method must be called only after calling - * decompressImage(). - */ - size_t getDecompressedImageHeight(); - /* - * Returns the XMP data from the image. - */ - void* getXMPPtr(); - /* - * Returns the decompressed XMP buffer size. This method must be called only after - * calling decompressImage() or getCompressedImageParameters(). - */ - size_t getXMPSize(); - /* - * Extracts EXIF package and updates the EXIF position / length without decoding the image. - */ - bool extractEXIF(const void* image, int length); - /* - * Returns the EXIF data from the image. - * This method must be called after extractEXIF() or decompressImage(). - */ - void* getEXIFPtr(); - /* - * Returns the decompressed EXIF buffer size. This method must be called only after - * calling decompressImage(), extractEXIF() or getCompressedImageParameters(). - */ - size_t getEXIFSize(); - /* - * Returns the position offset of EXIF package - * (4 bypes offset to FF sign, the byte after FF E1 XX XX ), - * or -1 if no EXIF exists. - * This method must be called after extractEXIF() or decompressImage(). - */ - int getEXIFPos() { return mExifPos; } - /* - * Returns the ICC data from the image. - */ - void* getICCPtr(); - /* - * Returns the decompressed ICC buffer size. This method must be called only after - * calling decompressImage() or getCompressedImageParameters(). - */ - size_t getICCSize(); - /* - * Decompresses metadata of the image. All vectors are owned by the caller. - */ - bool getCompressedImageParameters(const void* image, int length, size_t* pWidth, size_t* pHeight, - std::vector* iccData, std::vector* exifData); - - private: - bool decode(const void* image, int length, bool decodeToRGBA); - // Returns false if errors occur. - bool decompress(jpeg_decompress_struct* cinfo, const uint8_t* dest, bool isSingleChannel); - bool decompressYUV(jpeg_decompress_struct* cinfo, const uint8_t* dest); - bool decompressRGBA(jpeg_decompress_struct* cinfo, const uint8_t* dest); - bool decompressSingleChannel(jpeg_decompress_struct* cinfo, const uint8_t* dest); - // Process 16 lines of Y and 16 lines of U/V each time. - // We must pass at least 16 scanlines according to libjpeg documentation. - static const int kCompressBatchSize = 16; - // The buffer that holds the decompressed result. - std::vector mResultBuffer; - // The buffer that holds XMP Data. - std::vector mXMPBuffer; - // The buffer that holds EXIF Data. - std::vector mEXIFBuffer; - // The buffer that holds ICC Data. - std::vector mICCBuffer; - - // Resolution of the decompressed image. - size_t mWidth; - size_t mHeight; - - // Position of EXIF package, default value is -1 which means no EXIF package appears. - int mExifPos = -1; - - std::unique_ptr mEmpty = nullptr; - std::unique_ptr mBufferIntermediate = nullptr; -}; -} /* namespace ultrahdr */ - -#endif // ULTRAHDR_JPEGDECODERHELPER_H diff --git a/lib/jpegencoderhelper.cpp b/lib/jpegencoderhelper.cpp deleted file mode 100644 index c6f0b77..0000000 --- a/lib/jpegencoderhelper.cpp +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Copyright 2022 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include - -#include "ultrahdrcommon.h" -#include "ultrahdr.h" -#include "jpegencoderhelper.h" - -namespace ultrahdr { - -// The destination manager that can access |mResultBuffer| in JpegEncoderHelper. -struct destination_mgr { - struct jpeg_destination_mgr mgr; - JpegEncoderHelper* encoder; -}; - -JpegEncoderHelper::JpegEncoderHelper() {} - -JpegEncoderHelper::~JpegEncoderHelper() {} - -bool JpegEncoderHelper::compressImage(const uint8_t* yBuffer, const uint8_t* uvBuffer, int width, - int height, int lumaStride, int chromaStride, int quality, - const void* iccBuffer, unsigned int iccSize) { - mResultBuffer.clear(); - if (!encode(yBuffer, uvBuffer, width, height, lumaStride, chromaStride, quality, iccBuffer, - iccSize)) { - return false; - } - ALOGV("Compressed JPEG: %d[%dx%d] -> %zu bytes", (width * height * 12) / 8, width, height, - mResultBuffer.size()); - return true; -} - -void* JpegEncoderHelper::getCompressedImagePtr() { return mResultBuffer.data(); } - -size_t JpegEncoderHelper::getCompressedImageSize() { return mResultBuffer.size(); } - -void JpegEncoderHelper::initDestination(j_compress_ptr cinfo) { - destination_mgr* dest = reinterpret_cast(cinfo->dest); - std::vector& buffer = dest->encoder->mResultBuffer; - buffer.resize(kBlockSize); - dest->mgr.next_output_byte = &buffer[0]; - dest->mgr.free_in_buffer = buffer.size(); -} - -boolean JpegEncoderHelper::emptyOutputBuffer(j_compress_ptr cinfo) { - destination_mgr* dest = reinterpret_cast(cinfo->dest); - std::vector& buffer = dest->encoder->mResultBuffer; - size_t oldsize = buffer.size(); - buffer.resize(oldsize + kBlockSize); - dest->mgr.next_output_byte = &buffer[oldsize]; - dest->mgr.free_in_buffer = kBlockSize; - return true; -} - -void JpegEncoderHelper::terminateDestination(j_compress_ptr cinfo) { - destination_mgr* dest = reinterpret_cast(cinfo->dest); - std::vector& buffer = dest->encoder->mResultBuffer; - buffer.resize(buffer.size() - dest->mgr.free_in_buffer); -} - -void JpegEncoderHelper::outputErrorMessage(j_common_ptr cinfo) { - char buffer[JMSG_LENGTH_MAX]; - - /* Create the message */ - (*cinfo->err->format_message)(cinfo, buffer); - ALOGE("%s\n", buffer); -} - -bool JpegEncoderHelper::encode(const uint8_t* yBuffer, const uint8_t* uvBuffer, int width, - int height, int lumaStride, int chromaStride, int quality, - const void* iccBuffer, unsigned int iccSize) { - jpeg_compress_struct cinfo; - jpeg_error_mgr jerr; - - cinfo.err = jpeg_std_error(&jerr); - cinfo.err->output_message = &outputErrorMessage; - jpeg_create_compress(&cinfo); - setJpegDestination(&cinfo); - setJpegCompressStruct(width, height, quality, &cinfo, uvBuffer == nullptr); - jpeg_start_compress(&cinfo, TRUE); - if (iccBuffer != nullptr && iccSize > 0) { - jpeg_write_marker(&cinfo, JPEG_APP0 + 2, static_cast(iccBuffer), iccSize); - } - bool status = cinfo.num_components == 1 - ? compressY(&cinfo, yBuffer, lumaStride) - : compressYuv(&cinfo, yBuffer, uvBuffer, lumaStride, chromaStride); - jpeg_finish_compress(&cinfo); - jpeg_destroy_compress(&cinfo); - - return status; -} - -void JpegEncoderHelper::setJpegDestination(jpeg_compress_struct* cinfo) { - destination_mgr* dest = static_cast( - (*cinfo->mem->alloc_small)((j_common_ptr)cinfo, JPOOL_PERMANENT, sizeof(destination_mgr))); - dest->encoder = this; - dest->mgr.init_destination = &initDestination; - dest->mgr.empty_output_buffer = &emptyOutputBuffer; - dest->mgr.term_destination = &terminateDestination; - cinfo->dest = reinterpret_cast(dest); -} - -void JpegEncoderHelper::setJpegCompressStruct(int width, int height, int quality, - jpeg_compress_struct* cinfo, bool isSingleChannel) { - cinfo->image_width = width; - cinfo->image_height = height; - cinfo->input_components = isSingleChannel ? 1 : 3; - cinfo->in_color_space = isSingleChannel ? JCS_GRAYSCALE : JCS_YCbCr; - jpeg_set_defaults(cinfo); - jpeg_set_quality(cinfo, quality, TRUE); - cinfo->raw_data_in = TRUE; - cinfo->dct_method = JDCT_ISLOW; - cinfo->comp_info[0].h_samp_factor = cinfo->in_color_space == JCS_GRAYSCALE ? 1 : 2; - cinfo->comp_info[0].v_samp_factor = cinfo->in_color_space == JCS_GRAYSCALE ? 1 : 2; - for (int i = 1; i < cinfo->num_components; i++) { - cinfo->comp_info[i].h_samp_factor = 1; - cinfo->comp_info[i].v_samp_factor = 1; - } -} - -bool JpegEncoderHelper::compressYuv(jpeg_compress_struct* cinfo, const uint8_t* yBuffer, - const uint8_t* uvBuffer, int lumaStride, int chromaStride) { - size_t chroma_plane_size = chromaStride * cinfo->image_height / 2; - uint8_t* y_plane = const_cast(yBuffer); - uint8_t* u_plane = const_cast(uvBuffer); - uint8_t* v_plane = const_cast(u_plane + chroma_plane_size); - - const int aligned_width = ALIGNM(cinfo->image_width, kCompressBatchSize); - const bool need_luma_padding = (lumaStride < aligned_width); - const int aligned_chroma_width = ALIGNM(cinfo->image_width / 2, kCompressBatchSize / 2); - const bool need_chroma_padding = (chromaStride < aligned_chroma_width); - - std::unique_ptr empty = nullptr; - std::unique_ptr y_mcu_row = nullptr; - std::unique_ptr cb_mcu_row = nullptr; - std::unique_ptr cr_mcu_row = nullptr; - uint8_t* y_mcu_row_ptr = nullptr; - uint8_t* cb_mcu_row_ptr = nullptr; - uint8_t* cr_mcu_row_ptr = nullptr; - - JSAMPROW y[kCompressBatchSize]; - JSAMPROW cb[kCompressBatchSize / 2]; - JSAMPROW cr[kCompressBatchSize / 2]; - JSAMPARRAY planes[3]{y, cb, cr}; - - if (cinfo->image_height % kCompressBatchSize != 0) { - empty = std::make_unique(aligned_width); - memset(empty.get(), 0, aligned_width); - } - - if (need_luma_padding) { - size_t mcu_row_size = aligned_width * kCompressBatchSize; - y_mcu_row = std::make_unique(mcu_row_size); - y_mcu_row_ptr = y_mcu_row.get(); - uint8_t* tmp = y_mcu_row_ptr; - for (int i = 0; i < kCompressBatchSize; ++i, tmp += aligned_width) { - memset(tmp + cinfo->image_width, 0, aligned_width - cinfo->image_width); - } - } - - if (need_chroma_padding) { - size_t mcu_row_size = aligned_chroma_width * kCompressBatchSize / 2; - cb_mcu_row = std::make_unique(mcu_row_size); - cb_mcu_row_ptr = cb_mcu_row.get(); - cr_mcu_row = std::make_unique(mcu_row_size); - cr_mcu_row_ptr = cr_mcu_row.get(); - uint8_t* tmp1 = cb_mcu_row_ptr; - uint8_t* tmp2 = cr_mcu_row_ptr; - for (int i = 0; i < kCompressBatchSize / 2; - ++i, tmp1 += aligned_chroma_width, tmp2 += aligned_chroma_width) { - memset(tmp1 + cinfo->image_width / 2, 0, aligned_chroma_width - (cinfo->image_width / 2)); - memset(tmp2 + cinfo->image_width / 2, 0, aligned_chroma_width - (cinfo->image_width / 2)); - } - } - - while (cinfo->next_scanline < cinfo->image_height) { - for (int i = 0; i < kCompressBatchSize; ++i) { - size_t scanline = cinfo->next_scanline + i; - if (scanline < cinfo->image_height) { - y[i] = y_plane + scanline * lumaStride; - if (need_luma_padding) { - uint8_t* tmp = y_mcu_row_ptr + i * aligned_width; - memcpy(tmp, y[i], cinfo->image_width); - y[i] = tmp; - } - } else { - y[i] = empty.get(); - } - } - // cb, cr only have half scanlines - for (int i = 0; i < kCompressBatchSize / 2; ++i) { - size_t scanline = cinfo->next_scanline / 2 + i; - if (scanline < cinfo->image_height / 2) { - int offset = scanline * chromaStride; - cb[i] = u_plane + offset; - cr[i] = v_plane + offset; - if (need_chroma_padding) { - uint8_t* tmp = cb_mcu_row_ptr + i * aligned_chroma_width; - memcpy(tmp, cb[i], cinfo->image_width / 2); - cb[i] = tmp; - tmp = cr_mcu_row_ptr + i * aligned_chroma_width; - memcpy(tmp, cr[i], cinfo->image_width / 2); - cr[i] = tmp; - } - } else { - cb[i] = cr[i] = empty.get(); - } - } - int processed = jpeg_write_raw_data(cinfo, planes, kCompressBatchSize); - if (processed != kCompressBatchSize) { - ALOGE("Number of processed lines does not equal input lines."); - return false; - } - } - return true; -} - -bool JpegEncoderHelper::compressY(jpeg_compress_struct* cinfo, const uint8_t* yBuffer, - int lumaStride) { - uint8_t* y_plane = const_cast(yBuffer); - - const int aligned_luma_width = ALIGNM(cinfo->image_width, kCompressBatchSize); - const bool need_luma_padding = (lumaStride < aligned_luma_width); - - std::unique_ptr empty = nullptr; - std::unique_ptr y_mcu_row = nullptr; - uint8_t* y_mcu_row_ptr = nullptr; - - JSAMPROW y[kCompressBatchSize]; - JSAMPARRAY planes[1]{y}; - - if (cinfo->image_height % kCompressBatchSize != 0) { - empty = std::make_unique(aligned_luma_width); - memset(empty.get(), 0, aligned_luma_width); - } - - if (need_luma_padding) { - size_t mcu_row_size = aligned_luma_width * kCompressBatchSize; - y_mcu_row = std::make_unique(mcu_row_size); - y_mcu_row_ptr = y_mcu_row.get(); - uint8_t* tmp = y_mcu_row_ptr; - for (int i = 0; i < kCompressBatchSize; ++i, tmp += aligned_luma_width) { - memset(tmp + cinfo->image_width, 0, aligned_luma_width - cinfo->image_width); - } - } - - while (cinfo->next_scanline < cinfo->image_height) { - for (int i = 0; i < kCompressBatchSize; ++i) { - size_t scanline = cinfo->next_scanline + i; - if (scanline < cinfo->image_height) { - y[i] = y_plane + scanline * lumaStride; - if (need_luma_padding) { - uint8_t* tmp = y_mcu_row_ptr + i * aligned_luma_width; - memcpy(tmp, y[i], cinfo->image_width); - y[i] = tmp; - } - } else { - y[i] = empty.get(); - } - } - int processed = jpeg_write_raw_data(cinfo, planes, kCompressBatchSize); - if (processed != kCompressBatchSize / 2) { - ALOGE("Number of processed lines does not equal input lines."); - return false; - } - } - return true; -} - -} // namespace ultrahdr diff --git a/lib/jpegencoderhelper.h b/lib/jpegencoderhelper.h deleted file mode 100644 index e988578..0000000 --- a/lib/jpegencoderhelper.h +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright 2022 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ULTRAHDR_JPEGENCODERHELPER_H -#define ULTRAHDR_JPEGENCODERHELPER_H - -#include // For jpeglib.h. - -// C++ build requires extern C for jpeg internals. -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include - -#ifdef __cplusplus -} // extern "C" -#endif - -#include -#include - -namespace ultrahdr { - -/* - * Encapsulates a converter from raw image (YUV420planer or grey-scale) to JPEG format. - * This class is not thread-safe. - */ -class JpegEncoderHelper { - public: - JpegEncoderHelper(); - ~JpegEncoderHelper(); - - /* - * Compresses YUV420Planer image to JPEG format. After calling this method, call - * getCompressedImage() to get the image. |quality| is the jpeg image quality parameter to use. - * It ranges from 1 (poorest quality) to 100 (highest quality). |iccBuffer| is the buffer of - * ICC segment which will be added to the compressed image. - * Returns false if errors occur during compression. - */ - bool compressImage(const uint8_t* yBuffer, const uint8_t* uvBuffer, int width, int height, - int lumaStride, int chromaStride, int quality, const void* iccBuffer, - unsigned int iccSize); - - /* - * Returns the compressed JPEG buffer pointer. This method must be called only after calling - * compressImage(). - */ - void* getCompressedImagePtr(); - - /* - * Returns the compressed JPEG buffer size. This method must be called only after calling - * compressImage(). - */ - size_t getCompressedImageSize(); - - /* - * Process 16 lines of Y and 16 lines of U/V each time. - * We must pass at least 16 scanlines according to libjpeg documentation. - */ - static const int kCompressBatchSize = 16; - - private: - // initDestination(), emptyOutputBuffer() and emptyOutputBuffer() are callback functions to be - // passed into jpeg library. - static void initDestination(j_compress_ptr cinfo); - static boolean emptyOutputBuffer(j_compress_ptr cinfo); - static void terminateDestination(j_compress_ptr cinfo); - static void outputErrorMessage(j_common_ptr cinfo); - - // Returns false if errors occur. - bool encode(const uint8_t* yBuffer, const uint8_t* uvBuffer, int width, int height, - int lumaStride, int chromaStride, int quality, const void* iccBuffer, - unsigned int iccSize); - void setJpegDestination(jpeg_compress_struct* cinfo); - void setJpegCompressStruct(int width, int height, int quality, jpeg_compress_struct* cinfo, - bool isSingleChannel); - // Returns false if errors occur. - bool compressYuv(jpeg_compress_struct* cinfo, const uint8_t* yBuffer, const uint8_t* uvBuffer, - int lumaStride, int chromaStride); - bool compressY(jpeg_compress_struct* cinfo, const uint8_t* yBuffer, int lumaStride); - - // The block size for encoded jpeg image buffer. - static const int kBlockSize = 16384; - - // The buffer that holds the compressed result. - std::vector mResultBuffer; -}; - -} /* namespace ultrahdr */ - -#endif // ULTRAHDR_JPEGENCODERHELPER_H diff --git a/lib/jpegr.cpp b/lib/jpegr.cpp deleted file mode 100644 index 015ffce..0000000 --- a/lib/jpegr.cpp +++ /dev/null @@ -1,1509 +0,0 @@ -/* - * Copyright 2022 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifdef _WIN32 -#include -#include -#else -#include -#endif - -#include -#include -#include -#include -#include - -#include "ultrahdrcommon.h" -#include "jpegr.h" -#include "icc.h" -#include "multipictureformat.h" - -#include "image_io/base/data_segment_data_source.h" -#include "image_io/jpeg/jpeg_info.h" -#include "image_io/jpeg/jpeg_info_builder.h" -#include "image_io/jpeg/jpeg_marker.h" -#include "image_io/jpeg/jpeg_scanner.h" - -using namespace std; -using namespace photos_editing_formats::image_io; - -namespace ultrahdr { - -#define USE_SRGB_INVOETF_LUT 1 -#define USE_HLG_OETF_LUT 1 -#define USE_PQ_OETF_LUT 1 -#define USE_HLG_INVOETF_LUT 1 -#define USE_PQ_INVOETF_LUT 1 -#define USE_APPLY_GAIN_LUT 1 - -#define JPEGR_CHECK(x) \ - { \ - status_t status = (x); \ - if ((status) != JPEGR_NO_ERROR) { \ - return status; \ - } \ - } - -// JPEG compress quality (0 ~ 100) for gain map -static const int kMapCompressQuality = 85; - -int GetCPUCoreCount() { - int cpuCoreCount = 1; - -#if defined(_WIN32) - SYSTEM_INFO system_info; - ZeroMemory(&system_info, sizeof(system_info)); - GetSystemInfo(&system_info); - cpuCoreCount = (size_t)system_info.dwNumberOfProcessors; -#elif defined(_SC_NPROCESSORS_ONLN) - cpuCoreCount = sysconf(_SC_NPROCESSORS_ONLN); -#elif defined(_SC_NPROCESSORS_CONF) - cpuCoreCount = sysconf(_SC_NPROCESSORS_CONF); -#else -#error platform-specific implementation for GetCPUCoreCount() missing. -#endif - if (cpuCoreCount <= 0) cpuCoreCount = 1; - return cpuCoreCount; -} - -/* - * MessageWriter implementation for ALOG functions. - */ -class AlogMessageWriter : public MessageWriter { - public: - void WriteMessage(const Message& message) override { - std::string log = GetFormattedMessage(message); - ALOGD("%s", log.c_str()); - } -}; - -/* - * Helper function copies the JPEG image from without EXIF. - * - * @param pDest destination of the data to be written. - * @param pSource source of data being written. - * @param exif_pos position of the EXIF package, which is aligned with jpegdecoder.getEXIFPos(). - * (4 bytes offset to FF sign, the byte after FF E1 XX XX ). - * @param exif_size exif size without the initial 4 bytes, aligned with jpegdecoder.getEXIFSize(). - */ -static void copyJpegWithoutExif(jr_compressed_ptr pDest, jr_compressed_ptr pSource, size_t exif_pos, - size_t exif_size) { - const size_t exif_offset = 4; // exif_pos has 4 bytes offset to the FF sign - pDest->length = pSource->length - exif_size - exif_offset; - pDest->data = new uint8_t[pDest->length]; - pDest->maxLength = pDest->length; - pDest->colorGamut = pSource->colorGamut; - memcpy(pDest->data, pSource->data, exif_pos - exif_offset); - memcpy((uint8_t*)pDest->data + exif_pos - exif_offset, - (uint8_t*)pSource->data + exif_pos + exif_size, pSource->length - exif_pos - exif_size); -} - -status_t JpegR::areInputArgumentsValid(jr_uncompressed_ptr p010_image_ptr, - jr_uncompressed_ptr yuv420_image_ptr, - ultrahdr_transfer_function hdr_tf, - jr_compressed_ptr dest_ptr) { - if (p010_image_ptr == nullptr || p010_image_ptr->data == nullptr) { - ALOGE("Received nullptr for input p010 image"); - return ERROR_JPEGR_BAD_PTR; - } - if (p010_image_ptr->width % 2 != 0 || p010_image_ptr->height % 2 != 0) { - ALOGE("Image dimensions cannot be odd, image dimensions %zux%zu", p010_image_ptr->width, - p010_image_ptr->height); - return ERROR_JPEGR_UNSUPPORTED_WIDTH_HEIGHT; - } - if (p010_image_ptr->width < kMinWidth || p010_image_ptr->height < kMinHeight) { - ALOGE("Image dimensions cannot be less than %dx%d, image dimensions %zux%zu", kMinWidth, - kMinHeight, p010_image_ptr->width, p010_image_ptr->height); - return ERROR_JPEGR_UNSUPPORTED_WIDTH_HEIGHT; - } - if (p010_image_ptr->width > kMaxWidth || p010_image_ptr->height > kMaxHeight) { - ALOGE("Image dimensions cannot be larger than %dx%d, image dimensions %zux%zu", kMaxWidth, - kMaxHeight, p010_image_ptr->width, p010_image_ptr->height); - return ERROR_JPEGR_UNSUPPORTED_WIDTH_HEIGHT; - } - if (p010_image_ptr->colorGamut <= ULTRAHDR_COLORGAMUT_UNSPECIFIED || - p010_image_ptr->colorGamut > ULTRAHDR_COLORGAMUT_MAX) { - ALOGE("Unrecognized p010 color gamut %d", p010_image_ptr->colorGamut); - return ERROR_JPEGR_INVALID_COLORGAMUT; - } - if (p010_image_ptr->luma_stride != 0 && p010_image_ptr->luma_stride < p010_image_ptr->width) { - ALOGE("Luma stride must not be smaller than width, stride=%zu, width=%zu", - p010_image_ptr->luma_stride, p010_image_ptr->width); - return ERROR_JPEGR_INVALID_STRIDE; - } - if (p010_image_ptr->chroma_data != nullptr && - p010_image_ptr->chroma_stride < p010_image_ptr->width) { - ALOGE("Chroma stride must not be smaller than width, stride=%zu, width=%zu", - p010_image_ptr->chroma_stride, p010_image_ptr->width); - return ERROR_JPEGR_INVALID_STRIDE; - } - if (dest_ptr == nullptr || dest_ptr->data == nullptr) { - ALOGE("Received nullptr for destination"); - return ERROR_JPEGR_BAD_PTR; - } - if (hdr_tf <= ULTRAHDR_TF_UNSPECIFIED || hdr_tf > ULTRAHDR_TF_MAX || hdr_tf == ULTRAHDR_TF_SRGB) { - ALOGE("Invalid hdr transfer function %d", hdr_tf); - return ERROR_JPEGR_INVALID_TRANS_FUNC; - } - if (yuv420_image_ptr == nullptr) { - return JPEGR_NO_ERROR; - } - if (yuv420_image_ptr->data == nullptr) { - ALOGE("Received nullptr for uncompressed 420 image"); - return ERROR_JPEGR_BAD_PTR; - } - if (yuv420_image_ptr->luma_stride != 0 && - yuv420_image_ptr->luma_stride < yuv420_image_ptr->width) { - ALOGE("Luma stride must not be smaller than width, stride=%zu, width=%zu", - yuv420_image_ptr->luma_stride, yuv420_image_ptr->width); - return ERROR_JPEGR_INVALID_STRIDE; - } - if (yuv420_image_ptr->chroma_data != nullptr && - yuv420_image_ptr->chroma_stride < yuv420_image_ptr->width / 2) { - ALOGE("Chroma stride must not be smaller than (width / 2), stride=%zu, width=%zu", - yuv420_image_ptr->chroma_stride, yuv420_image_ptr->width); - return ERROR_JPEGR_INVALID_STRIDE; - } - if (p010_image_ptr->width != yuv420_image_ptr->width || - p010_image_ptr->height != yuv420_image_ptr->height) { - ALOGE("Image resolutions mismatch: P010: %zux%zu, YUV420: %zux%zu", p010_image_ptr->width, - p010_image_ptr->height, yuv420_image_ptr->width, yuv420_image_ptr->height); - return ERROR_JPEGR_RESOLUTION_MISMATCH; - } - if (yuv420_image_ptr->colorGamut <= ULTRAHDR_COLORGAMUT_UNSPECIFIED || - yuv420_image_ptr->colorGamut > ULTRAHDR_COLORGAMUT_MAX) { - ALOGE("Unrecognized 420 color gamut %d", yuv420_image_ptr->colorGamut); - return ERROR_JPEGR_INVALID_COLORGAMUT; - } - return JPEGR_NO_ERROR; -} - -status_t JpegR::areInputArgumentsValid(jr_uncompressed_ptr p010_image_ptr, - jr_uncompressed_ptr yuv420_image_ptr, - ultrahdr_transfer_function hdr_tf, - jr_compressed_ptr dest_ptr, int quality) { - if (quality < 0 || quality > 100) { - ALOGE("quality factor is out side range [0-100], quality factor : %d", quality); - return ERROR_JPEGR_INVALID_QUALITY_FACTOR; - } - return areInputArgumentsValid(p010_image_ptr, yuv420_image_ptr, hdr_tf, dest_ptr); -} - -/* Encode API-0 */ -status_t JpegR::encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, ultrahdr_transfer_function hdr_tf, - jr_compressed_ptr dest, int quality, jr_exif_ptr exif) { - // validate input arguments - JPEGR_CHECK(areInputArgumentsValid(p010_image_ptr, nullptr, hdr_tf, dest, quality)); - if (exif != nullptr && exif->data == nullptr) { - ALOGE("received nullptr for exif metadata"); - return ERROR_JPEGR_BAD_PTR; - } - - // clean up input structure for later usage - jpegr_uncompressed_struct p010_image = *p010_image_ptr; - if (p010_image.luma_stride == 0) p010_image.luma_stride = p010_image.width; - if (!p010_image.chroma_data) { - uint16_t* data = reinterpret_cast(p010_image.data); - p010_image.chroma_data = data + p010_image.luma_stride * p010_image.height; - p010_image.chroma_stride = p010_image.luma_stride; - } - - const size_t yu420_luma_stride = ALIGNM(p010_image.width, JpegEncoderHelper::kCompressBatchSize); - unique_ptr yuv420_image_data = - make_unique(yu420_luma_stride * p010_image.height * 3 / 2); - jpegr_uncompressed_struct yuv420_image; - yuv420_image.data = yuv420_image_data.get(); - yuv420_image.width = p010_image.width; - yuv420_image.height = p010_image.height; - yuv420_image.colorGamut = p010_image.colorGamut; - yuv420_image.chroma_data = nullptr; - yuv420_image.luma_stride = yu420_luma_stride; - yuv420_image.chroma_stride = yu420_luma_stride >> 1; - uint8_t* data = reinterpret_cast(yuv420_image.data); - yuv420_image.chroma_data = data + yuv420_image.luma_stride * yuv420_image.height; - - // tone map - JPEGR_CHECK(toneMap(&p010_image, &yuv420_image)); - - // gain map - ultrahdr_metadata_struct metadata; - metadata.version = kJpegrVersion; - jpegr_uncompressed_struct gainmap_image; - JPEGR_CHECK(generateGainMap(&yuv420_image, &p010_image, hdr_tf, &metadata, &gainmap_image)); - std::unique_ptr map_data; - map_data.reset(reinterpret_cast(gainmap_image.data)); - - // compress gain map - JpegEncoderHelper jpeg_enc_obj_gm; - JPEGR_CHECK(compressGainMap(&gainmap_image, &jpeg_enc_obj_gm)); - jpegr_compressed_struct compressed_map; - compressed_map.data = jpeg_enc_obj_gm.getCompressedImagePtr(); - compressed_map.length = static_cast(jpeg_enc_obj_gm.getCompressedImageSize()); - compressed_map.maxLength = static_cast(jpeg_enc_obj_gm.getCompressedImageSize()); - compressed_map.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED; - - std::shared_ptr icc = - IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB, yuv420_image.colorGamut); - - // convert to Bt601 YUV encoding for JPEG encode - if (yuv420_image.colorGamut != ULTRAHDR_COLORGAMUT_P3) { - JPEGR_CHECK(convertYuv(&yuv420_image, yuv420_image.colorGamut, ULTRAHDR_COLORGAMUT_P3)); - } - - // compress 420 image - JpegEncoderHelper jpeg_enc_obj_yuv420; - if (!jpeg_enc_obj_yuv420.compressImage(reinterpret_cast(yuv420_image.data), - reinterpret_cast(yuv420_image.chroma_data), - yuv420_image.width, yuv420_image.height, - yuv420_image.luma_stride, yuv420_image.chroma_stride, - quality, icc->getData(), icc->getLength())) { - return ERROR_JPEGR_ENCODE_ERROR; - } - jpegr_compressed_struct jpeg; - jpeg.data = jpeg_enc_obj_yuv420.getCompressedImagePtr(); - jpeg.length = static_cast(jpeg_enc_obj_yuv420.getCompressedImageSize()); - jpeg.maxLength = static_cast(jpeg_enc_obj_yuv420.getCompressedImageSize()); - jpeg.colorGamut = yuv420_image.colorGamut; - - // append gain map, no ICC since JPEG encode already did it - JPEGR_CHECK(appendGainMap(&jpeg, &compressed_map, exif, /* icc */ nullptr, /* icc size */ 0, - &metadata, dest)); - - return JPEGR_NO_ERROR; -} - -/* Encode API-1 */ -status_t JpegR::encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, - jr_uncompressed_ptr yuv420_image_ptr, ultrahdr_transfer_function hdr_tf, - jr_compressed_ptr dest, int quality, jr_exif_ptr exif) { - // validate input arguments - if (yuv420_image_ptr == nullptr) { - ALOGE("received nullptr for uncompressed 420 image"); - return ERROR_JPEGR_BAD_PTR; - } - if (exif != nullptr && exif->data == nullptr) { - ALOGE("received nullptr for exif metadata"); - return ERROR_JPEGR_BAD_PTR; - } - JPEGR_CHECK(areInputArgumentsValid(p010_image_ptr, yuv420_image_ptr, hdr_tf, dest, quality)) - - // clean up input structure for later usage - jpegr_uncompressed_struct p010_image = *p010_image_ptr; - if (p010_image.luma_stride == 0) p010_image.luma_stride = p010_image.width; - if (!p010_image.chroma_data) { - uint16_t* data = reinterpret_cast(p010_image.data); - p010_image.chroma_data = data + p010_image.luma_stride * p010_image.height; - p010_image.chroma_stride = p010_image.luma_stride; - } - jpegr_uncompressed_struct yuv420_image = *yuv420_image_ptr; - if (yuv420_image.luma_stride == 0) yuv420_image.luma_stride = yuv420_image.width; - if (!yuv420_image.chroma_data) { - uint8_t* data = reinterpret_cast(yuv420_image.data); - yuv420_image.chroma_data = data + yuv420_image.luma_stride * yuv420_image.height; - yuv420_image.chroma_stride = yuv420_image.luma_stride >> 1; - } - - // gain map - ultrahdr_metadata_struct metadata; - metadata.version = kJpegrVersion; - jpegr_uncompressed_struct gainmap_image; - JPEGR_CHECK(generateGainMap(&yuv420_image, &p010_image, hdr_tf, &metadata, &gainmap_image)); - std::unique_ptr map_data; - map_data.reset(reinterpret_cast(gainmap_image.data)); - - // compress gain map - JpegEncoderHelper jpeg_enc_obj_gm; - JPEGR_CHECK(compressGainMap(&gainmap_image, &jpeg_enc_obj_gm)); - jpegr_compressed_struct compressed_map; - compressed_map.data = jpeg_enc_obj_gm.getCompressedImagePtr(); - compressed_map.length = static_cast(jpeg_enc_obj_gm.getCompressedImageSize()); - compressed_map.maxLength = static_cast(jpeg_enc_obj_gm.getCompressedImageSize()); - compressed_map.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED; - - std::shared_ptr icc = - IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB, yuv420_image.colorGamut); - - jpegr_uncompressed_struct yuv420_bt601_image = yuv420_image; - unique_ptr yuv_420_bt601_data; - // Convert to bt601 YUV encoding for JPEG encode - if (yuv420_image.colorGamut != ULTRAHDR_COLORGAMUT_P3) { - const size_t yuv_420_bt601_luma_stride = - ALIGNM(yuv420_image.width, JpegEncoderHelper::kCompressBatchSize); - yuv_420_bt601_data = - make_unique(yuv_420_bt601_luma_stride * yuv420_image.height * 3 / 2); - yuv420_bt601_image.data = yuv_420_bt601_data.get(); - yuv420_bt601_image.colorGamut = yuv420_image.colorGamut; - yuv420_bt601_image.luma_stride = yuv_420_bt601_luma_stride; - uint8_t* data = reinterpret_cast(yuv420_bt601_image.data); - yuv420_bt601_image.chroma_data = data + yuv_420_bt601_luma_stride * yuv420_image.height; - yuv420_bt601_image.chroma_stride = yuv_420_bt601_luma_stride >> 1; - - { - // copy luma - uint8_t* y_dst = reinterpret_cast(yuv420_bt601_image.data); - uint8_t* y_src = reinterpret_cast(yuv420_image.data); - if (yuv420_bt601_image.luma_stride == yuv420_image.luma_stride) { - memcpy(y_dst, y_src, yuv420_bt601_image.luma_stride * yuv420_image.height); - } else { - for (size_t i = 0; i < yuv420_image.height; i++) { - memcpy(y_dst, y_src, yuv420_image.width); - if (yuv420_image.width != yuv420_bt601_image.luma_stride) { - memset(y_dst + yuv420_image.width, 0, - yuv420_bt601_image.luma_stride - yuv420_image.width); - } - y_dst += yuv420_bt601_image.luma_stride; - y_src += yuv420_image.luma_stride; - } - } - } - - if (yuv420_bt601_image.chroma_stride == yuv420_image.chroma_stride) { - // copy luma - uint8_t* ch_dst = reinterpret_cast(yuv420_bt601_image.chroma_data); - uint8_t* ch_src = reinterpret_cast(yuv420_image.chroma_data); - memcpy(ch_dst, ch_src, yuv420_bt601_image.chroma_stride * yuv420_image.height); - } else { - // copy cb & cr - uint8_t* cb_dst = reinterpret_cast(yuv420_bt601_image.chroma_data); - uint8_t* cb_src = reinterpret_cast(yuv420_image.chroma_data); - uint8_t* cr_dst = cb_dst + (yuv420_bt601_image.chroma_stride * yuv420_bt601_image.height / 2); - uint8_t* cr_src = cb_src + (yuv420_image.chroma_stride * yuv420_image.height / 2); - for (size_t i = 0; i < yuv420_image.height / 2; i++) { - memcpy(cb_dst, cb_src, yuv420_image.width / 2); - memcpy(cr_dst, cr_src, yuv420_image.width / 2); - if (yuv420_bt601_image.width / 2 != yuv420_bt601_image.chroma_stride) { - memset(cb_dst + yuv420_image.width / 2, 0, - yuv420_bt601_image.chroma_stride - yuv420_image.width / 2); - memset(cr_dst + yuv420_image.width / 2, 0, - yuv420_bt601_image.chroma_stride - yuv420_image.width / 2); - } - cb_dst += yuv420_bt601_image.chroma_stride; - cb_src += yuv420_image.chroma_stride; - cr_dst += yuv420_bt601_image.chroma_stride; - cr_src += yuv420_image.chroma_stride; - } - } - JPEGR_CHECK(convertYuv(&yuv420_bt601_image, yuv420_image.colorGamut, ULTRAHDR_COLORGAMUT_P3)); - } - - // compress 420 image - JpegEncoderHelper jpeg_enc_obj_yuv420; - if (!jpeg_enc_obj_yuv420.compressImage( - reinterpret_cast(yuv420_bt601_image.data), - reinterpret_cast(yuv420_bt601_image.chroma_data), yuv420_bt601_image.width, - yuv420_bt601_image.height, yuv420_bt601_image.luma_stride, - yuv420_bt601_image.chroma_stride, quality, icc->getData(), icc->getLength())) { - return ERROR_JPEGR_ENCODE_ERROR; - } - - jpegr_compressed_struct jpeg; - jpeg.data = jpeg_enc_obj_yuv420.getCompressedImagePtr(); - jpeg.length = static_cast(jpeg_enc_obj_yuv420.getCompressedImageSize()); - jpeg.maxLength = static_cast(jpeg_enc_obj_yuv420.getCompressedImageSize()); - jpeg.colorGamut = yuv420_image.colorGamut; - - // append gain map, no ICC since JPEG encode already did it - JPEGR_CHECK(appendGainMap(&jpeg, &compressed_map, exif, /* icc */ nullptr, /* icc size */ 0, - &metadata, dest)); - return JPEGR_NO_ERROR; -} - -/* Encode API-2 */ -status_t JpegR::encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, - jr_uncompressed_ptr yuv420_image_ptr, - jr_compressed_ptr yuv420jpg_image_ptr, - ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest) { - // validate input arguments - if (yuv420_image_ptr == nullptr) { - ALOGE("received nullptr for uncompressed 420 image"); - return ERROR_JPEGR_BAD_PTR; - } - if (yuv420jpg_image_ptr == nullptr || yuv420jpg_image_ptr->data == nullptr) { - ALOGE("received nullptr for compressed jpeg image"); - return ERROR_JPEGR_BAD_PTR; - } - JPEGR_CHECK(areInputArgumentsValid(p010_image_ptr, yuv420_image_ptr, hdr_tf, dest)) - - // clean up input structure for later usage - jpegr_uncompressed_struct p010_image = *p010_image_ptr; - if (p010_image.luma_stride == 0) p010_image.luma_stride = p010_image.width; - if (!p010_image.chroma_data) { - uint16_t* data = reinterpret_cast(p010_image.data); - p010_image.chroma_data = data + p010_image.luma_stride * p010_image.height; - p010_image.chroma_stride = p010_image.luma_stride; - } - jpegr_uncompressed_struct yuv420_image = *yuv420_image_ptr; - if (yuv420_image.luma_stride == 0) yuv420_image.luma_stride = yuv420_image.width; - if (!yuv420_image.chroma_data) { - uint8_t* data = reinterpret_cast(yuv420_image.data); - yuv420_image.chroma_data = data + yuv420_image.luma_stride * p010_image.height; - yuv420_image.chroma_stride = yuv420_image.luma_stride >> 1; - } - - // gain map - ultrahdr_metadata_struct metadata; - metadata.version = kJpegrVersion; - jpegr_uncompressed_struct gainmap_image; - JPEGR_CHECK(generateGainMap(&yuv420_image, &p010_image, hdr_tf, &metadata, &gainmap_image)); - std::unique_ptr map_data; - map_data.reset(reinterpret_cast(gainmap_image.data)); - - // compress gain map - JpegEncoderHelper jpeg_enc_obj_gm; - JPEGR_CHECK(compressGainMap(&gainmap_image, &jpeg_enc_obj_gm)); - jpegr_compressed_struct gainmapjpg_image; - gainmapjpg_image.data = jpeg_enc_obj_gm.getCompressedImagePtr(); - gainmapjpg_image.length = static_cast(jpeg_enc_obj_gm.getCompressedImageSize()); - gainmapjpg_image.maxLength = static_cast(jpeg_enc_obj_gm.getCompressedImageSize()); - gainmapjpg_image.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED; - - return encodeJPEGR(yuv420jpg_image_ptr, &gainmapjpg_image, &metadata, dest); -} - -/* Encode API-3 */ -status_t JpegR::encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, - jr_compressed_ptr yuv420jpg_image_ptr, - ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest) { - // validate input arguments - if (yuv420jpg_image_ptr == nullptr || yuv420jpg_image_ptr->data == nullptr) { - ALOGE("received nullptr for compressed jpeg image"); - return ERROR_JPEGR_BAD_PTR; - } - JPEGR_CHECK(areInputArgumentsValid(p010_image_ptr, nullptr, hdr_tf, dest)) - - // clean up input structure for later usage - jpegr_uncompressed_struct p010_image = *p010_image_ptr; - if (p010_image.luma_stride == 0) p010_image.luma_stride = p010_image.width; - if (!p010_image.chroma_data) { - uint16_t* data = reinterpret_cast(p010_image.data); - p010_image.chroma_data = data + p010_image.luma_stride * p010_image.height; - p010_image.chroma_stride = p010_image.luma_stride; - } - - // decode input jpeg, gamut is going to be bt601. - JpegDecoderHelper jpeg_dec_obj_yuv420; - if (!jpeg_dec_obj_yuv420.decompressImage(yuv420jpg_image_ptr->data, - yuv420jpg_image_ptr->length)) { - return ERROR_JPEGR_DECODE_ERROR; - } - jpegr_uncompressed_struct yuv420_image{}; - yuv420_image.data = jpeg_dec_obj_yuv420.getDecompressedImagePtr(); - yuv420_image.width = jpeg_dec_obj_yuv420.getDecompressedImageWidth(); - yuv420_image.height = jpeg_dec_obj_yuv420.getDecompressedImageHeight(); - yuv420_image.colorGamut = yuv420jpg_image_ptr->colorGamut; - if (yuv420_image.luma_stride == 0) yuv420_image.luma_stride = yuv420_image.width; - if (!yuv420_image.chroma_data) { - uint8_t* data = reinterpret_cast(yuv420_image.data); - yuv420_image.chroma_data = data + yuv420_image.luma_stride * p010_image.height; - yuv420_image.chroma_stride = yuv420_image.luma_stride >> 1; - } - - if (p010_image_ptr->width != yuv420_image.width || - p010_image_ptr->height != yuv420_image.height) { - return ERROR_JPEGR_RESOLUTION_MISMATCH; - } - - // gain map - ultrahdr_metadata_struct metadata; - metadata.version = kJpegrVersion; - jpegr_uncompressed_struct gainmap_image; - JPEGR_CHECK(generateGainMap(&yuv420_image, &p010_image, hdr_tf, &metadata, &gainmap_image, - true /* sdr_is_601 */)); - std::unique_ptr map_data; - map_data.reset(reinterpret_cast(gainmap_image.data)); - - // compress gain map - JpegEncoderHelper jpeg_enc_obj_gm; - JPEGR_CHECK(compressGainMap(&gainmap_image, &jpeg_enc_obj_gm)); - jpegr_compressed_struct gainmapjpg_image; - gainmapjpg_image.data = jpeg_enc_obj_gm.getCompressedImagePtr(); - gainmapjpg_image.length = static_cast(jpeg_enc_obj_gm.getCompressedImageSize()); - gainmapjpg_image.maxLength = static_cast(jpeg_enc_obj_gm.getCompressedImageSize()); - gainmapjpg_image.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED; - - return encodeJPEGR(yuv420jpg_image_ptr, &gainmapjpg_image, &metadata, dest); -} - -/* Encode API-4 */ -status_t JpegR::encodeJPEGR(jr_compressed_ptr yuv420jpg_image_ptr, - jr_compressed_ptr gainmapjpg_image_ptr, ultrahdr_metadata_ptr metadata, - jr_compressed_ptr dest) { - if (yuv420jpg_image_ptr == nullptr || yuv420jpg_image_ptr->data == nullptr) { - ALOGE("received nullptr for compressed jpeg image"); - return ERROR_JPEGR_BAD_PTR; - } - if (gainmapjpg_image_ptr == nullptr || gainmapjpg_image_ptr->data == nullptr) { - ALOGE("received nullptr for compressed gain map"); - return ERROR_JPEGR_BAD_PTR; - } - if (dest == nullptr || dest->data == nullptr) { - ALOGE("received nullptr for destination"); - return ERROR_JPEGR_BAD_PTR; - } - - // We just want to check if ICC is present, so don't do a full decode. Note, - // this doesn't verify that the ICC is valid. - JpegDecoderHelper decoder; - std::vector icc; - decoder.getCompressedImageParameters(yuv420jpg_image_ptr->data, yuv420jpg_image_ptr->length, - /* pWidth */ nullptr, /* pHeight */ nullptr, &icc, - /* exifData */ nullptr); - - // Add ICC if not already present. - if (icc.size() > 0) { - JPEGR_CHECK(appendGainMap(yuv420jpg_image_ptr, gainmapjpg_image_ptr, /* exif */ nullptr, - /* icc */ nullptr, /* icc size */ 0, metadata, dest)); - } else { - std::shared_ptr newIcc = - IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB, yuv420jpg_image_ptr->colorGamut); - JPEGR_CHECK(appendGainMap(yuv420jpg_image_ptr, gainmapjpg_image_ptr, /* exif */ nullptr, - newIcc->getData(), newIcc->getLength(), metadata, dest)); - } - - return JPEGR_NO_ERROR; -} - -status_t JpegR::getJPEGRInfo(jr_compressed_ptr jpegr_image_ptr, jr_info_ptr jpeg_image_info_ptr) { - if (jpegr_image_ptr == nullptr || jpegr_image_ptr->data == nullptr) { - ALOGE("received nullptr for compressed jpegr image"); - return ERROR_JPEGR_BAD_PTR; - } - if (jpeg_image_info_ptr == nullptr) { - ALOGE("received nullptr for compressed jpegr info struct"); - return ERROR_JPEGR_BAD_PTR; - } - - jpegr_compressed_struct primary_image, gainmap_image; - status_t status = extractPrimaryImageAndGainMap(jpegr_image_ptr, &primary_image, &gainmap_image); - if (status != JPEGR_NO_ERROR && status != ERROR_JPEGR_GAIN_MAP_IMAGE_NOT_FOUND) { - return status; - } - - JpegDecoderHelper jpeg_dec_obj_hdr; - if (!jpeg_dec_obj_hdr.getCompressedImageParameters( - primary_image.data, primary_image.length, &jpeg_image_info_ptr->width, - &jpeg_image_info_ptr->height, jpeg_image_info_ptr->iccData, - jpeg_image_info_ptr->exifData)) { - return ERROR_JPEGR_DECODE_ERROR; - } - - return status; -} - -/* Decode API */ -status_t JpegR::decodeJPEGR(jr_compressed_ptr jpegr_image_ptr, jr_uncompressed_ptr dest, - float max_display_boost, jr_exif_ptr exif, - ultrahdr_output_format output_format, - jr_uncompressed_ptr gainmap_image_ptr, ultrahdr_metadata_ptr metadata) { - if (jpegr_image_ptr == nullptr || jpegr_image_ptr->data == nullptr) { - ALOGE("received nullptr for compressed jpegr image"); - return ERROR_JPEGR_BAD_PTR; - } - if (dest == nullptr || dest->data == nullptr) { - ALOGE("received nullptr for dest image"); - return ERROR_JPEGR_BAD_PTR; - } - if (max_display_boost < 1.0f) { - ALOGE("received bad value for max_display_boost %f", max_display_boost); - return ERROR_JPEGR_INVALID_DISPLAY_BOOST; - } - if (exif != nullptr && exif->data == nullptr) { - ALOGE("received nullptr address for exif data"); - return ERROR_JPEGR_BAD_PTR; - } - if (output_format <= ULTRAHDR_OUTPUT_UNSPECIFIED || output_format > ULTRAHDR_OUTPUT_MAX) { - ALOGE("received bad value for output format %d", output_format); - return ERROR_JPEGR_INVALID_OUTPUT_FORMAT; - } - - jpegr_compressed_struct primary_jpeg_image, gainmap_jpeg_image; - status_t status = - extractPrimaryImageAndGainMap(jpegr_image_ptr, &primary_jpeg_image, &gainmap_jpeg_image); - if (status != JPEGR_NO_ERROR) { - if (output_format != ULTRAHDR_OUTPUT_SDR || status != ERROR_JPEGR_GAIN_MAP_IMAGE_NOT_FOUND) { - ALOGE("received invalid compressed jpegr image"); - return status; - } - } - - JpegDecoderHelper jpeg_dec_obj_yuv420; - if (!jpeg_dec_obj_yuv420.decompressImage(primary_jpeg_image.data, primary_jpeg_image.length, - (output_format == ULTRAHDR_OUTPUT_SDR))) { - return ERROR_JPEGR_DECODE_ERROR; - } - - if (output_format == ULTRAHDR_OUTPUT_SDR) { - if ((jpeg_dec_obj_yuv420.getDecompressedImageWidth() * - jpeg_dec_obj_yuv420.getDecompressedImageHeight() * 4) > - jpeg_dec_obj_yuv420.getDecompressedImageSize()) { - return ERROR_JPEGR_DECODE_ERROR; - } - } else { - if ((jpeg_dec_obj_yuv420.getDecompressedImageWidth() * - jpeg_dec_obj_yuv420.getDecompressedImageHeight() * 3 / 2) > - jpeg_dec_obj_yuv420.getDecompressedImageSize()) { - return ERROR_JPEGR_DECODE_ERROR; - } - } - - if (exif != nullptr) { - if (exif->data == nullptr) { - return ERROR_JPEGR_BAD_PTR; - } - if (exif->length < jpeg_dec_obj_yuv420.getEXIFSize()) { - return ERROR_JPEGR_BUFFER_TOO_SMALL; - } - memcpy(exif->data, jpeg_dec_obj_yuv420.getEXIFPtr(), jpeg_dec_obj_yuv420.getEXIFSize()); - exif->length = jpeg_dec_obj_yuv420.getEXIFSize(); - } - - if (output_format == ULTRAHDR_OUTPUT_SDR) { - dest->width = jpeg_dec_obj_yuv420.getDecompressedImageWidth(); - dest->height = jpeg_dec_obj_yuv420.getDecompressedImageHeight(); - memcpy(dest->data, jpeg_dec_obj_yuv420.getDecompressedImagePtr(), - dest->width * dest->height * 4); - return JPEGR_NO_ERROR; - } - - JpegDecoderHelper jpeg_dec_obj_gm; - if (!jpeg_dec_obj_gm.decompressImage(gainmap_jpeg_image.data, gainmap_jpeg_image.length)) { - return ERROR_JPEGR_DECODE_ERROR; - } - if ((jpeg_dec_obj_gm.getDecompressedImageWidth() * jpeg_dec_obj_gm.getDecompressedImageHeight()) > - jpeg_dec_obj_gm.getDecompressedImageSize()) { - return ERROR_JPEGR_DECODE_ERROR; - } - - jpegr_uncompressed_struct gainmap_image; - gainmap_image.data = jpeg_dec_obj_gm.getDecompressedImagePtr(); - gainmap_image.width = jpeg_dec_obj_gm.getDecompressedImageWidth(); - gainmap_image.height = jpeg_dec_obj_gm.getDecompressedImageHeight(); - - if (gainmap_image_ptr != nullptr) { - gainmap_image_ptr->width = gainmap_image.width; - gainmap_image_ptr->height = gainmap_image.height; - int size = gainmap_image_ptr->width * gainmap_image_ptr->height; - gainmap_image_ptr->data = malloc(size); - memcpy(gainmap_image_ptr->data, gainmap_image.data, size); - } - - ultrahdr_metadata_struct uhdr_metadata; - if (!getMetadataFromXMP(static_cast(jpeg_dec_obj_gm.getXMPPtr()), - jpeg_dec_obj_gm.getXMPSize(), &uhdr_metadata)) { - return ERROR_JPEGR_METADATA_ERROR; - } - - if (metadata != nullptr) { - metadata->version = uhdr_metadata.version; - metadata->minContentBoost = uhdr_metadata.minContentBoost; - metadata->maxContentBoost = uhdr_metadata.maxContentBoost; - metadata->gamma = uhdr_metadata.gamma; - metadata->offsetSdr = uhdr_metadata.offsetSdr; - metadata->offsetHdr = uhdr_metadata.offsetHdr; - metadata->hdrCapacityMin = uhdr_metadata.hdrCapacityMin; - metadata->hdrCapacityMax = uhdr_metadata.hdrCapacityMax; - } - - jpegr_uncompressed_struct yuv420_image; - yuv420_image.data = jpeg_dec_obj_yuv420.getDecompressedImagePtr(); - yuv420_image.width = jpeg_dec_obj_yuv420.getDecompressedImageWidth(); - yuv420_image.height = jpeg_dec_obj_yuv420.getDecompressedImageHeight(); - yuv420_image.colorGamut = IccHelper::readIccColorGamut(jpeg_dec_obj_yuv420.getICCPtr(), - jpeg_dec_obj_yuv420.getICCSize()); - yuv420_image.luma_stride = yuv420_image.width; - uint8_t* data = reinterpret_cast(yuv420_image.data); - yuv420_image.chroma_data = data + yuv420_image.luma_stride * yuv420_image.height; - yuv420_image.chroma_stride = yuv420_image.width >> 1; - - JPEGR_CHECK(applyGainMap(&yuv420_image, &gainmap_image, &uhdr_metadata, output_format, - max_display_boost, dest)); - return JPEGR_NO_ERROR; -} - -status_t JpegR::compressGainMap(jr_uncompressed_ptr gainmap_image_ptr, - JpegEncoderHelper* jpeg_enc_obj_ptr) { - if (gainmap_image_ptr == nullptr || jpeg_enc_obj_ptr == nullptr) { - return ERROR_JPEGR_BAD_PTR; - } - - // Don't need to convert YUV to Bt601 since single channel - if (!jpeg_enc_obj_ptr->compressImage(reinterpret_cast(gainmap_image_ptr->data), nullptr, - gainmap_image_ptr->width, gainmap_image_ptr->height, - gainmap_image_ptr->luma_stride, 0, kMapCompressQuality, - nullptr, 0)) { - return ERROR_JPEGR_ENCODE_ERROR; - } - - return JPEGR_NO_ERROR; -} - -const int kJobSzInRows = 16; -static_assert(kJobSzInRows > 0 && kJobSzInRows % kMapDimensionScaleFactor == 0, - "align job size to kMapDimensionScaleFactor"); - -class JobQueue { - public: - bool dequeueJob(size_t& rowStart, size_t& rowEnd); - void enqueueJob(size_t rowStart, size_t rowEnd); - void markQueueForEnd(); - void reset(); - - private: - bool mQueuedAllJobs = false; - std::deque> mJobs; - std::mutex mMutex; - std::condition_variable mCv; -}; - -bool JobQueue::dequeueJob(size_t& rowStart, size_t& rowEnd) { - std::unique_lock lock{mMutex}; - while (true) { - if (mJobs.empty()) { - if (mQueuedAllJobs) { - return false; - } else { - mCv.wait_for(lock, std::chrono::milliseconds(100)); - } - } else { - auto it = mJobs.begin(); - rowStart = std::get<0>(*it); - rowEnd = std::get<1>(*it); - mJobs.erase(it); - return true; - } - } - return false; -} - -void JobQueue::enqueueJob(size_t rowStart, size_t rowEnd) { - std::unique_lock lock{mMutex}; - mJobs.push_back(std::make_tuple(rowStart, rowEnd)); - lock.unlock(); - mCv.notify_one(); -} - -void JobQueue::markQueueForEnd() { - std::unique_lock lock{mMutex}; - mQueuedAllJobs = true; - lock.unlock(); - mCv.notify_all(); -} - -void JobQueue::reset() { - std::unique_lock lock{mMutex}; - mJobs.clear(); - mQueuedAllJobs = false; -} - -status_t JpegR::generateGainMap(jr_uncompressed_ptr yuv420_image_ptr, - jr_uncompressed_ptr p010_image_ptr, - ultrahdr_transfer_function hdr_tf, ultrahdr_metadata_ptr metadata, - jr_uncompressed_ptr dest, bool sdr_is_601) { - if (yuv420_image_ptr == nullptr || p010_image_ptr == nullptr || metadata == nullptr || - dest == nullptr || yuv420_image_ptr->data == nullptr || - yuv420_image_ptr->chroma_data == nullptr || p010_image_ptr->data == nullptr || - p010_image_ptr->chroma_data == nullptr) { - return ERROR_JPEGR_BAD_PTR; - } - if (yuv420_image_ptr->width != p010_image_ptr->width || - yuv420_image_ptr->height != p010_image_ptr->height) { - return ERROR_JPEGR_RESOLUTION_MISMATCH; - } - if (yuv420_image_ptr->colorGamut == ULTRAHDR_COLORGAMUT_UNSPECIFIED || - p010_image_ptr->colorGamut == ULTRAHDR_COLORGAMUT_UNSPECIFIED) { - return ERROR_JPEGR_INVALID_COLORGAMUT; - } - - size_t image_width = yuv420_image_ptr->width; - size_t image_height = yuv420_image_ptr->height; - size_t map_width = image_width / kMapDimensionScaleFactor; - size_t map_height = image_height / kMapDimensionScaleFactor; - - dest->data = new uint8_t[map_width * map_height]; - dest->width = map_width; - dest->height = map_height; - dest->colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED; - dest->luma_stride = map_width; - dest->chroma_data = nullptr; - dest->chroma_stride = 0; - std::unique_ptr map_data; - map_data.reset(reinterpret_cast(dest->data)); - - ColorTransformFn hdrInvOetf = nullptr; - float hdr_white_nits; - switch (hdr_tf) { - case ULTRAHDR_TF_LINEAR: - hdrInvOetf = identityConversion; - // Note: this will produce clipping if the input exceeds kHlgMaxNits. - // TODO: TF LINEAR will be deprecated. - hdr_white_nits = kHlgMaxNits; - break; - case ULTRAHDR_TF_HLG: -#if USE_HLG_INVOETF_LUT - hdrInvOetf = hlgInvOetfLUT; -#else - hdrInvOetf = hlgInvOetf; -#endif - hdr_white_nits = kHlgMaxNits; - break; - case ULTRAHDR_TF_PQ: -#if USE_PQ_INVOETF_LUT - hdrInvOetf = pqInvOetfLUT; -#else - hdrInvOetf = pqInvOetf; -#endif - hdr_white_nits = kPqMaxNits; - break; - default: - // Should be impossible to hit after input validation. - return ERROR_JPEGR_INVALID_TRANS_FUNC; - } - - metadata->maxContentBoost = hdr_white_nits / kSdrWhiteNits; - metadata->minContentBoost = 1.0f; - metadata->gamma = 1.0f; - metadata->offsetSdr = 0.0f; - metadata->offsetHdr = 0.0f; - metadata->hdrCapacityMin = 1.0f; - metadata->hdrCapacityMax = metadata->maxContentBoost; - - float log2MinBoost = log2(metadata->minContentBoost); - float log2MaxBoost = log2(metadata->maxContentBoost); - - ColorTransformFn hdrGamutConversionFn = - getHdrConversionFn(yuv420_image_ptr->colorGamut, p010_image_ptr->colorGamut); - - ColorCalculationFn luminanceFn = nullptr; - ColorTransformFn sdrYuvToRgbFn = nullptr; - switch (yuv420_image_ptr->colorGamut) { - case ULTRAHDR_COLORGAMUT_BT709: - luminanceFn = srgbLuminance; - sdrYuvToRgbFn = srgbYuvToRgb; - break; - case ULTRAHDR_COLORGAMUT_P3: - luminanceFn = p3Luminance; - sdrYuvToRgbFn = p3YuvToRgb; - break; - case ULTRAHDR_COLORGAMUT_BT2100: - luminanceFn = bt2100Luminance; - sdrYuvToRgbFn = bt2100YuvToRgb; - break; - case ULTRAHDR_COLORGAMUT_UNSPECIFIED: - // Should be impossible to hit after input validation. - return ERROR_JPEGR_INVALID_COLORGAMUT; - } - if (sdr_is_601) { - sdrYuvToRgbFn = p3YuvToRgb; - } - - ColorTransformFn hdrYuvToRgbFn = nullptr; - switch (p010_image_ptr->colorGamut) { - case ULTRAHDR_COLORGAMUT_BT709: - hdrYuvToRgbFn = srgbYuvToRgb; - break; - case ULTRAHDR_COLORGAMUT_P3: - hdrYuvToRgbFn = p3YuvToRgb; - break; - case ULTRAHDR_COLORGAMUT_BT2100: - hdrYuvToRgbFn = bt2100YuvToRgb; - break; - case ULTRAHDR_COLORGAMUT_UNSPECIFIED: - // Should be impossible to hit after input validation. - return ERROR_JPEGR_INVALID_COLORGAMUT; - } - - const int threads = (std::min)(GetCPUCoreCount(), 4); - size_t rowStep = threads == 1 ? image_height : kJobSzInRows; - JobQueue jobQueue; - - std::function generateMap = [yuv420_image_ptr, p010_image_ptr, metadata, dest, hdrInvOetf, - hdrGamutConversionFn, luminanceFn, sdrYuvToRgbFn, - hdrYuvToRgbFn, hdr_white_nits, log2MinBoost, log2MaxBoost, - &jobQueue]() -> void { - size_t rowStart, rowEnd; - while (jobQueue.dequeueJob(rowStart, rowEnd)) { - for (size_t y = rowStart; y < rowEnd; ++y) { - for (size_t x = 0; x < dest->width; ++x) { - Color sdr_yuv_gamma = sampleYuv420(yuv420_image_ptr, kMapDimensionScaleFactor, x, y); - Color sdr_rgb_gamma = sdrYuvToRgbFn(sdr_yuv_gamma); - // We are assuming the SDR input is always sRGB transfer. -#if USE_SRGB_INVOETF_LUT - Color sdr_rgb = srgbInvOetfLUT(sdr_rgb_gamma); -#else - Color sdr_rgb = srgbInvOetf(sdr_rgb_gamma); -#endif - float sdr_y_nits = luminanceFn(sdr_rgb) * kSdrWhiteNits; - - Color hdr_yuv_gamma = sampleP010(p010_image_ptr, kMapDimensionScaleFactor, x, y); - Color hdr_rgb_gamma = hdrYuvToRgbFn(hdr_yuv_gamma); - Color hdr_rgb = hdrInvOetf(hdr_rgb_gamma); - hdr_rgb = hdrGamutConversionFn(hdr_rgb); - float hdr_y_nits = luminanceFn(hdr_rgb) * hdr_white_nits; - - size_t pixel_idx = x + y * dest->width; - reinterpret_cast(dest->data)[pixel_idx] = - encodeGain(sdr_y_nits, hdr_y_nits, metadata, log2MinBoost, log2MaxBoost); - } - } - } - }; - - // generate map - std::vector workers; - for (int th = 0; th < threads - 1; th++) { - workers.push_back(std::thread(generateMap)); - } - - rowStep = (threads == 1 ? image_height : kJobSzInRows) / kMapDimensionScaleFactor; - for (size_t rowStart = 0; rowStart < map_height;) { - size_t rowEnd = (std::min)(rowStart + rowStep, map_height); - jobQueue.enqueueJob(rowStart, rowEnd); - rowStart = rowEnd; - } - jobQueue.markQueueForEnd(); - generateMap(); - std::for_each(workers.begin(), workers.end(), [](std::thread& t) { t.join(); }); - - map_data.release(); - return JPEGR_NO_ERROR; -} - -status_t JpegR::applyGainMap(jr_uncompressed_ptr yuv420_image_ptr, - jr_uncompressed_ptr gainmap_image_ptr, ultrahdr_metadata_ptr metadata, - ultrahdr_output_format output_format, float max_display_boost, - jr_uncompressed_ptr dest) { - if (yuv420_image_ptr == nullptr || gainmap_image_ptr == nullptr || metadata == nullptr || - dest == nullptr || yuv420_image_ptr->data == nullptr || - yuv420_image_ptr->chroma_data == nullptr || gainmap_image_ptr->data == nullptr) { - return ERROR_JPEGR_BAD_PTR; - } - if (metadata->version.compare(kJpegrVersion)) { - ALOGE("Unsupported metadata version: %s", metadata->version.c_str()); - return ERROR_JPEGR_BAD_METADATA; - } - if (metadata->gamma != 1.0f) { - ALOGE("Unsupported metadata gamma: %f", metadata->gamma); - return ERROR_JPEGR_BAD_METADATA; - } - if (metadata->offsetSdr != 0.0f || metadata->offsetHdr != 0.0f) { - ALOGE("Unsupported metadata offset sdr, hdr: %f, %f", metadata->offsetSdr, metadata->offsetHdr); - return ERROR_JPEGR_BAD_METADATA; - } - if (metadata->hdrCapacityMin != metadata->minContentBoost || - metadata->hdrCapacityMax != metadata->maxContentBoost) { - ALOGE("Unsupported metadata hdr capacity min, max: %f, %f", metadata->hdrCapacityMin, - metadata->hdrCapacityMax); - return ERROR_JPEGR_BAD_METADATA; - } - - // TODO: remove once map scaling factor is computed based on actual map dims - size_t image_width = yuv420_image_ptr->width; - size_t image_height = yuv420_image_ptr->height; - size_t map_width = image_width / kMapDimensionScaleFactor; - size_t map_height = image_height / kMapDimensionScaleFactor; - if (map_width != gainmap_image_ptr->width || map_height != gainmap_image_ptr->height) { - ALOGE( - "gain map dimensions and primary image dimensions are not to scale, computed gain map " - "resolution is %zux%zu, received gain map resolution is %zux%zu", - map_width, map_height, gainmap_image_ptr->width, gainmap_image_ptr->height); - return ERROR_JPEGR_RESOLUTION_MISMATCH; - } - - dest->width = yuv420_image_ptr->width; - dest->height = yuv420_image_ptr->height; - ShepardsIDW idwTable(kMapDimensionScaleFactor); - float display_boost = (std::min)(max_display_boost, metadata->maxContentBoost); - GainLUT gainLUT(metadata, display_boost); - - JobQueue jobQueue; - std::function applyRecMap = [yuv420_image_ptr, gainmap_image_ptr, dest, &jobQueue, - &idwTable, output_format, &gainLUT, - display_boost]() -> void { - size_t width = yuv420_image_ptr->width; - - size_t rowStart, rowEnd; - while (jobQueue.dequeueJob(rowStart, rowEnd)) { - for (size_t y = rowStart; y < rowEnd; ++y) { - for (size_t x = 0; x < width; ++x) { - Color yuv_gamma_sdr = getYuv420Pixel(yuv420_image_ptr, x, y); - // Assuming the sdr image is a decoded JPEG, we should always use Rec.601 YUV coefficients - Color rgb_gamma_sdr = p3YuvToRgb(yuv_gamma_sdr); - // We are assuming the SDR base image is always sRGB transfer. -#if USE_SRGB_INVOETF_LUT - Color rgb_sdr = srgbInvOetfLUT(rgb_gamma_sdr); -#else - Color rgb_sdr = srgbInvOetf(rgb_gamma_sdr); -#endif - float gain; - // TODO: determine map scaling factor based on actual map dims - size_t map_scale_factor = kMapDimensionScaleFactor; - // TODO: If map_scale_factor is guaranteed to be an integer, then remove the following. - // Currently map_scale_factor is of type size_t, but it could be changed to a float - // later. - if (map_scale_factor != floorf(map_scale_factor)) { - gain = sampleMap(gainmap_image_ptr, map_scale_factor, x, y); - } else { - gain = sampleMap(gainmap_image_ptr, map_scale_factor, x, y, idwTable); - } - -#if USE_APPLY_GAIN_LUT - Color rgb_hdr = applyGainLUT(rgb_sdr, gain, gainLUT); -#else - Color rgb_hdr = applyGain(rgb_sdr, gain, metadata, display_boost); -#endif - rgb_hdr = rgb_hdr / display_boost; - size_t pixel_idx = x + y * width; - - switch (output_format) { - case ULTRAHDR_OUTPUT_HDR_LINEAR: { - uint64_t rgba_f16 = colorToRgbaF16(rgb_hdr); - reinterpret_cast(dest->data)[pixel_idx] = rgba_f16; - break; - } - case ULTRAHDR_OUTPUT_HDR_HLG: { -#if USE_HLG_OETF_LUT - ColorTransformFn hdrOetf = hlgOetfLUT; -#else - ColorTransformFn hdrOetf = hlgOetf; -#endif - Color rgb_gamma_hdr = hdrOetf(rgb_hdr); - uint32_t rgba_1010102 = colorToRgba1010102(rgb_gamma_hdr); - reinterpret_cast(dest->data)[pixel_idx] = rgba_1010102; - break; - } - case ULTRAHDR_OUTPUT_HDR_PQ: { -#if USE_PQ_OETF_LUT - ColorTransformFn hdrOetf = pqOetfLUT; -#else - ColorTransformFn hdrOetf = pqOetf; -#endif - Color rgb_gamma_hdr = hdrOetf(rgb_hdr); - uint32_t rgba_1010102 = colorToRgba1010102(rgb_gamma_hdr); - reinterpret_cast(dest->data)[pixel_idx] = rgba_1010102; - break; - } - default: { - } - // Should be impossible to hit after input validation. - } - } - } - } - }; - - const int threads = (std::min)(GetCPUCoreCount(), 4); - std::vector workers; - for (int th = 0; th < threads - 1; th++) { - workers.push_back(std::thread(applyRecMap)); - } - const int rowStep = threads == 1 ? yuv420_image_ptr->height : kJobSzInRows; - for (size_t rowStart = 0; rowStart < yuv420_image_ptr->height;) { - int rowEnd = (std::min)(rowStart + rowStep, yuv420_image_ptr->height); - jobQueue.enqueueJob(rowStart, rowEnd); - rowStart = rowEnd; - } - jobQueue.markQueueForEnd(); - applyRecMap(); - std::for_each(workers.begin(), workers.end(), [](std::thread& t) { t.join(); }); - return JPEGR_NO_ERROR; -} - -status_t JpegR::extractPrimaryImageAndGainMap(jr_compressed_ptr jpegr_image_ptr, - jr_compressed_ptr primary_jpg_image_ptr, - jr_compressed_ptr gainmap_jpg_image_ptr) { - if (jpegr_image_ptr == nullptr) { - return ERROR_JPEGR_BAD_PTR; - } - - MessageHandler msg_handler; - msg_handler.SetMessageWriter(make_unique(AlogMessageWriter())); - std::shared_ptr seg = DataSegment::Create( - DataRange(0, jpegr_image_ptr->length), static_cast(jpegr_image_ptr->data), - DataSegment::BufferDispositionPolicy::kDontDelete); - DataSegmentDataSource data_source(seg); - JpegInfoBuilder jpeg_info_builder; - jpeg_info_builder.SetImageLimit(2); - JpegScanner jpeg_scanner(&msg_handler); - jpeg_scanner.Run(&data_source, &jpeg_info_builder); - data_source.Reset(); - - if (jpeg_scanner.HasError()) { - return JPEGR_UNKNOWN_ERROR; - } - - const auto& jpeg_info = jpeg_info_builder.GetInfo(); - const auto& image_ranges = jpeg_info.GetImageRanges(); - - if (image_ranges.empty()) { - return ERROR_JPEGR_NO_IMAGES_FOUND; - } - - if (primary_jpg_image_ptr != nullptr) { - primary_jpg_image_ptr->data = - static_cast(jpegr_image_ptr->data) + image_ranges[0].GetBegin(); - primary_jpg_image_ptr->length = image_ranges[0].GetLength(); - } - - if (image_ranges.size() == 1) { - return ERROR_JPEGR_GAIN_MAP_IMAGE_NOT_FOUND; - } - - if (gainmap_jpg_image_ptr != nullptr) { - gainmap_jpg_image_ptr->data = - static_cast(jpegr_image_ptr->data) + image_ranges[1].GetBegin(); - gainmap_jpg_image_ptr->length = image_ranges[1].GetLength(); - } - - // TODO: choose primary image and gain map image carefully - if (image_ranges.size() > 2) { - ALOGW("Number of jpeg images present %d, primary, gain map images may not be correctly chosen", - (int)image_ranges.size()); - } - - return JPEGR_NO_ERROR; -} - -// JPEG/R structure: -// SOI (ff d8) -// -// (Optional, if EXIF package is from outside (Encode API-0 API-1), or if EXIF package presents -// in the JPEG input (Encode API-2, API-3, API-4)) -// APP1 (ff e1) -// 2 bytes of length (2 + length of exif package) -// EXIF package (this includes the first two bytes representing the package length) -// -// (Required, XMP package) APP1 (ff e1) -// 2 bytes of length (2 + 29 + length of xmp package) -// name space ("http://ns.adobe.com/xap/1.0/\0") -// XMP -// -// (Required, MPF package) APP2 (ff e2) -// 2 bytes of length -// MPF -// -// (Required) primary image (without the first two bytes (SOI) and EXIF, may have other packages) -// -// SOI (ff d8) -// -// (Required, XMP package) APP1 (ff e1) -// 2 bytes of length (2 + 29 + length of xmp package) -// name space ("http://ns.adobe.com/xap/1.0/\0") -// XMP -// -// (Required) secondary image (the gain map, without the first two bytes (SOI)) -// -// Metadata versions we are using: -// ECMA TR-98 for JFIF marker -// Exif 2.2 spec for EXIF marker -// Adobe XMP spec part 3 for XMP marker -// ICC v4.3 spec for ICC -status_t JpegR::appendGainMap(jr_compressed_ptr primary_jpg_image_ptr, - jr_compressed_ptr gainmap_jpg_image_ptr, jr_exif_ptr pExif, - void* pIcc, size_t icc_size, ultrahdr_metadata_ptr metadata, - jr_compressed_ptr dest) { - if (primary_jpg_image_ptr == nullptr || gainmap_jpg_image_ptr == nullptr || metadata == nullptr || - dest == nullptr) { - return ERROR_JPEGR_BAD_PTR; - } - if (metadata->version.compare("1.0")) { - ALOGE("received bad value for version: %s", metadata->version.c_str()); - return ERROR_JPEGR_BAD_METADATA; - } - if (metadata->maxContentBoost < metadata->minContentBoost) { - ALOGE("received bad value for content boost min %f, max %f", metadata->minContentBoost, - metadata->maxContentBoost); - return ERROR_JPEGR_BAD_METADATA; - } - if (metadata->hdrCapacityMax < metadata->hdrCapacityMin || metadata->hdrCapacityMin < 1.0f) { - ALOGE("received bad value for hdr capacity min %f, max %f", metadata->hdrCapacityMin, - metadata->hdrCapacityMax); - return ERROR_JPEGR_BAD_METADATA; - } - if (metadata->offsetSdr < 0.0f || metadata->offsetHdr < 0.0f) { - ALOGE("received bad value for offset sdr %f, hdr %f", metadata->offsetSdr, metadata->offsetHdr); - return ERROR_JPEGR_BAD_METADATA; - } - if (metadata->gamma <= 0.0f) { - ALOGE("received bad value for gamma %f", metadata->gamma); - return ERROR_JPEGR_BAD_METADATA; - } - - const string nameSpace = "http://ns.adobe.com/xap/1.0/"; - const int nameSpaceLength = nameSpace.size() + 1; // need to count the null terminator - - // calculate secondary image length first, because the length will be written into the primary - // image xmp - const string xmp_secondary = generateXmpForSecondaryImage(*metadata); - // xmp_secondary_length = 2 bytes representing the length of the package + - // + nameSpaceLength = 29 bytes length - // + length of xmp packet = xmp_secondary.size() - const int xmp_secondary_length = 2 + nameSpaceLength + xmp_secondary.size(); - const int secondary_image_size = 2 /* 2 bytes length of APP1 sign */ - + xmp_secondary_length + gainmap_jpg_image_ptr->length; - // primary image - const string xmp_primary = generateXmpForPrimaryImage(secondary_image_size, *metadata); - // same as primary - const int xmp_primary_length = 2 + nameSpaceLength + xmp_primary.size(); - - // Check if EXIF package presents in the JPEG input. - // If so, extract and remove the EXIF package. - JpegDecoderHelper decoder; - if (!decoder.extractEXIF(primary_jpg_image_ptr->data, primary_jpg_image_ptr->length)) { - return ERROR_JPEGR_DECODE_ERROR; - } - jpegr_exif_struct exif_from_jpg; - exif_from_jpg.data = nullptr; - exif_from_jpg.length = 0; - jpegr_compressed_struct new_jpg_image; - new_jpg_image.data = nullptr; - new_jpg_image.length = 0; - new_jpg_image.maxLength = 0; - new_jpg_image.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED; - std::unique_ptr dest_data; - if (decoder.getEXIFPos() >= 0) { - if (pExif != nullptr) { - ALOGE("received EXIF from outside while the primary image already contains EXIF"); - return ERROR_JPEGR_MULTIPLE_EXIFS_RECEIVED; - } - copyJpegWithoutExif(&new_jpg_image, primary_jpg_image_ptr, decoder.getEXIFPos(), - decoder.getEXIFSize()); - dest_data.reset(reinterpret_cast(new_jpg_image.data)); - exif_from_jpg.data = decoder.getEXIFPtr(); - exif_from_jpg.length = decoder.getEXIFSize(); - pExif = &exif_from_jpg; - } - - jr_compressed_ptr final_primary_jpg_image_ptr = - new_jpg_image.length == 0 ? primary_jpg_image_ptr : &new_jpg_image; - - int pos = 0; - // Begin primary image - // Write SOI - JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos)); - JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kSOI, 1, pos)); - - // Write EXIF - if (pExif != nullptr) { - const int length = 2 + pExif->length; - const uint8_t lengthH = ((length >> 8) & 0xff); - const uint8_t lengthL = (length & 0xff); - JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos)); - JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP1, 1, pos)); - JPEGR_CHECK(Write(dest, &lengthH, 1, pos)); - JPEGR_CHECK(Write(dest, &lengthL, 1, pos)); - JPEGR_CHECK(Write(dest, pExif->data, pExif->length, pos)); - } - - // Prepare and write XMP - { - const int length = xmp_primary_length; - const uint8_t lengthH = ((length >> 8) & 0xff); - const uint8_t lengthL = (length & 0xff); - JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos)); - JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP1, 1, pos)); - JPEGR_CHECK(Write(dest, &lengthH, 1, pos)); - JPEGR_CHECK(Write(dest, &lengthL, 1, pos)); - JPEGR_CHECK(Write(dest, (void*)nameSpace.c_str(), nameSpaceLength, pos)); - JPEGR_CHECK(Write(dest, (void*)xmp_primary.c_str(), xmp_primary.size(), pos)); - } - - // Write ICC - if (pIcc != nullptr && icc_size > 0) { - const int length = icc_size + 2; - const uint8_t lengthH = ((length >> 8) & 0xff); - const uint8_t lengthL = (length & 0xff); - JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos)); - JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP2, 1, pos)); - JPEGR_CHECK(Write(dest, &lengthH, 1, pos)); - JPEGR_CHECK(Write(dest, &lengthL, 1, pos)); - JPEGR_CHECK(Write(dest, pIcc, icc_size, pos)); - } - - // Prepare and write MPF - { - const int length = 2 + calculateMpfSize(); - const uint8_t lengthH = ((length >> 8) & 0xff); - const uint8_t lengthL = (length & 0xff); - int primary_image_size = pos + length + final_primary_jpg_image_ptr->length; - // between APP2 + package size + signature - // ff e2 00 58 4d 50 46 00 - // 2 + 2 + 4 = 8 (bytes) - // and ff d8 sign of the secondary image - int secondary_image_offset = primary_image_size - pos - 8; - std::shared_ptr mpf = generateMpf(primary_image_size, 0, /* primary_image_offset */ - secondary_image_size, secondary_image_offset); - JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos)); - JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP2, 1, pos)); - JPEGR_CHECK(Write(dest, &lengthH, 1, pos)); - JPEGR_CHECK(Write(dest, &lengthL, 1, pos)); - JPEGR_CHECK(Write(dest, (void*)mpf->getData(), mpf->getLength(), pos)); - } - - // Write primary image - JPEGR_CHECK(Write(dest, (uint8_t*)final_primary_jpg_image_ptr->data + 2, - final_primary_jpg_image_ptr->length - 2, pos)); - // Finish primary image - - // Begin secondary image (gain map) - // Write SOI - JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos)); - JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kSOI, 1, pos)); - - // Prepare and write XMP - { - const int length = xmp_secondary_length; - const uint8_t lengthH = ((length >> 8) & 0xff); - const uint8_t lengthL = (length & 0xff); - JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos)); - JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP1, 1, pos)); - JPEGR_CHECK(Write(dest, &lengthH, 1, pos)); - JPEGR_CHECK(Write(dest, &lengthL, 1, pos)); - JPEGR_CHECK(Write(dest, (void*)nameSpace.c_str(), nameSpaceLength, pos)); - JPEGR_CHECK(Write(dest, (void*)xmp_secondary.c_str(), xmp_secondary.size(), pos)); - } - - // Write secondary image - JPEGR_CHECK(Write(dest, (uint8_t*)gainmap_jpg_image_ptr->data + 2, - gainmap_jpg_image_ptr->length - 2, pos)); - - // Set back length - dest->length = pos; - - // Done! - return JPEGR_NO_ERROR; -} - -status_t JpegR::toneMap(jr_uncompressed_ptr src, jr_uncompressed_ptr dest) { - if (src == nullptr || dest == nullptr) { - return ERROR_JPEGR_BAD_PTR; - } - if (src->width != dest->width || src->height != dest->height) { - return ERROR_JPEGR_RESOLUTION_MISMATCH; - } - uint16_t* src_y_data = reinterpret_cast(src->data); - uint8_t* dst_y_data = reinterpret_cast(dest->data); - for (size_t y = 0; y < src->height; ++y) { - uint16_t* src_y_row = src_y_data + y * src->luma_stride; - uint8_t* dst_y_row = dst_y_data + y * dest->luma_stride; - for (size_t x = 0; x < src->width; ++x) { - uint16_t y_uint = src_y_row[x] >> 6; - dst_y_row[x] = static_cast((y_uint >> 2) & 0xff); - } - if (dest->width != dest->luma_stride) { - memset(dst_y_row + dest->width, 0, dest->luma_stride - dest->width); - } - } - uint16_t* src_uv_data = reinterpret_cast(src->chroma_data); - uint8_t* dst_u_data = reinterpret_cast(dest->chroma_data); - size_t dst_v_offset = (dest->chroma_stride * dest->height / 2); - uint8_t* dst_v_data = dst_u_data + dst_v_offset; - for (size_t y = 0; y < src->height / 2; ++y) { - uint16_t* src_uv_row = src_uv_data + y * src->chroma_stride; - uint8_t* dst_u_row = dst_u_data + y * dest->chroma_stride; - uint8_t* dst_v_row = dst_v_data + y * dest->chroma_stride; - for (size_t x = 0; x < src->width / 2; ++x) { - uint16_t u_uint = src_uv_row[x << 1] >> 6; - uint16_t v_uint = src_uv_row[(x << 1) + 1] >> 6; - dst_u_row[x] = static_cast((u_uint >> 2) & 0xff); - dst_v_row[x] = static_cast((v_uint >> 2) & 0xff); - } - if (dest->width / 2 != dest->chroma_stride) { - memset(dst_u_row + dest->width / 2, 0, dest->chroma_stride - dest->width / 2); - memset(dst_v_row + dest->width / 2, 0, dest->chroma_stride - dest->width / 2); - } - } - dest->colorGamut = src->colorGamut; - return JPEGR_NO_ERROR; -} - -status_t JpegR::convertYuv(jr_uncompressed_ptr image, ultrahdr_color_gamut src_encoding, - ultrahdr_color_gamut dest_encoding) { - if (image == nullptr) { - return ERROR_JPEGR_BAD_PTR; - } - if (src_encoding == ULTRAHDR_COLORGAMUT_UNSPECIFIED || - dest_encoding == ULTRAHDR_COLORGAMUT_UNSPECIFIED) { - return ERROR_JPEGR_INVALID_COLORGAMUT; - } - - ColorTransformFn conversionFn = nullptr; - switch (src_encoding) { - case ULTRAHDR_COLORGAMUT_BT709: - switch (dest_encoding) { - case ULTRAHDR_COLORGAMUT_BT709: - return JPEGR_NO_ERROR; - case ULTRAHDR_COLORGAMUT_P3: - conversionFn = yuv709To601; - break; - case ULTRAHDR_COLORGAMUT_BT2100: - conversionFn = yuv709To2100; - break; - default: - // Should be impossible to hit after input validation - return ERROR_JPEGR_INVALID_COLORGAMUT; - } - break; - case ULTRAHDR_COLORGAMUT_P3: - switch (dest_encoding) { - case ULTRAHDR_COLORGAMUT_BT709: - conversionFn = yuv601To709; - break; - case ULTRAHDR_COLORGAMUT_P3: - return JPEGR_NO_ERROR; - case ULTRAHDR_COLORGAMUT_BT2100: - conversionFn = yuv601To2100; - break; - default: - // Should be impossible to hit after input validation - return ERROR_JPEGR_INVALID_COLORGAMUT; - } - break; - case ULTRAHDR_COLORGAMUT_BT2100: - switch (dest_encoding) { - case ULTRAHDR_COLORGAMUT_BT709: - conversionFn = yuv2100To709; - break; - case ULTRAHDR_COLORGAMUT_P3: - conversionFn = yuv2100To601; - break; - case ULTRAHDR_COLORGAMUT_BT2100: - return JPEGR_NO_ERROR; - default: - // Should be impossible to hit after input validation - return ERROR_JPEGR_INVALID_COLORGAMUT; - } - break; - default: - // Should be impossible to hit after input validation - return ERROR_JPEGR_INVALID_COLORGAMUT; - } - - if (conversionFn == nullptr) { - // Should be impossible to hit after input validation - return ERROR_JPEGR_INVALID_COLORGAMUT; - } - - for (size_t y = 0; y < image->height / 2; ++y) { - for (size_t x = 0; x < image->width / 2; ++x) { - transformYuv420(image, x, y, conversionFn); - } - } - - return JPEGR_NO_ERROR; -} - -} // namespace ultrahdr diff --git a/lib/jpegr.h b/lib/jpegr.h deleted file mode 100644 index 5c8f9c3..0000000 --- a/lib/jpegr.h +++ /dev/null @@ -1,464 +0,0 @@ -/* - * Copyright 2022 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ULTRAHDR_JPEGR_H -#define ULTRAHDR_JPEGR_H - -#include - -#include "ultrahdr.h" -#include "jpegdecoderhelper.h" -#include "jpegencoderhelper.h" - -namespace ultrahdr { - -// The current JPEGR version that we encode to -static const char* const kJpegrVersion = "1.0"; - -// Map is quarter res / sixteenth size -static const size_t kMapDimensionScaleFactor = 4; - -// Gain Map width is (image_width / kMapDimensionScaleFactor). If we were to -// compress 420 GainMap in jpeg, then we need at least 2 samples. For Grayscale -// 1 sample is sufficient. We are using 2 here anyways -static const int kMinWidth = 2 * kMapDimensionScaleFactor; -static const int kMinHeight = 2 * kMapDimensionScaleFactor; - -typedef enum { - JPEGR_NO_ERROR = 0, - JPEGR_UNKNOWN_ERROR = -1, - - JPEGR_IO_ERROR_BASE = -10000, - ERROR_JPEGR_BAD_PTR = JPEGR_IO_ERROR_BASE - 1, - ERROR_JPEGR_UNSUPPORTED_WIDTH_HEIGHT = JPEGR_IO_ERROR_BASE - 2, - ERROR_JPEGR_INVALID_COLORGAMUT = JPEGR_IO_ERROR_BASE - 3, - ERROR_JPEGR_INVALID_STRIDE = JPEGR_IO_ERROR_BASE - 4, - ERROR_JPEGR_INVALID_TRANS_FUNC = JPEGR_IO_ERROR_BASE - 5, - ERROR_JPEGR_RESOLUTION_MISMATCH = JPEGR_IO_ERROR_BASE - 6, - ERROR_JPEGR_INVALID_QUALITY_FACTOR = JPEGR_IO_ERROR_BASE - 7, - ERROR_JPEGR_INVALID_DISPLAY_BOOST = JPEGR_IO_ERROR_BASE - 8, - ERROR_JPEGR_INVALID_OUTPUT_FORMAT = JPEGR_IO_ERROR_BASE - 9, - ERROR_JPEGR_BAD_METADATA = JPEGR_IO_ERROR_BASE - 10, - - JPEGR_RUNTIME_ERROR_BASE = -20000, - ERROR_JPEGR_ENCODE_ERROR = JPEGR_RUNTIME_ERROR_BASE - 1, - ERROR_JPEGR_DECODE_ERROR = JPEGR_RUNTIME_ERROR_BASE - 2, - ERROR_JPEGR_GAIN_MAP_IMAGE_NOT_FOUND = JPEGR_RUNTIME_ERROR_BASE - 3, - ERROR_JPEGR_BUFFER_TOO_SMALL = JPEGR_RUNTIME_ERROR_BASE - 4, - ERROR_JPEGR_METADATA_ERROR = JPEGR_RUNTIME_ERROR_BASE - 5, - ERROR_JPEGR_NO_IMAGES_FOUND = JPEGR_RUNTIME_ERROR_BASE - 6, - ERROR_JPEGR_MULTIPLE_EXIFS_RECEIVED = JPEGR_RUNTIME_ERROR_BASE - 7, - - ERROR_JPEGR_UNSUPPORTED_FEATURE = -30000, -} status_t; - -/* - * Holds information of jpegr image - */ -struct jpegr_info_struct { - size_t width; - size_t height; - std::vector* iccData; - std::vector* exifData; -}; - -/* - * Holds information for uncompressed image or gain map. - */ -struct jpegr_uncompressed_struct { - // Pointer to the data location. - void* data; - // Width of the gain map or the luma plane of the image in pixels. - size_t width; - // Height of the gain map or the luma plane of the image in pixels. - size_t height; - // Color gamut. - ultrahdr_color_gamut colorGamut; - - // Values below are optional - // Pointer to chroma data, if it's NULL, chroma plane is considered to be immediately - // after the luma plane. - void* chroma_data = nullptr; - // Stride of Y plane in number of pixels. 0 indicates the member is uninitialized. If - // non-zero this value must be larger than or equal to luma width. If stride is - // uninitialized then it is assumed to be equal to luma width. - size_t luma_stride = 0; - // Stride of UV plane in number of pixels. - // 1. If this handle points to P010 image then this value must be larger than - // or equal to luma width. - // 2. If this handle points to 420 image then this value must be larger than - // or equal to (luma width / 2). - // NOTE: if chroma_data is nullptr, chroma_stride is irrelevant. Just as the way, - // chroma_data is derived from luma ptr, chroma stride is derived from luma stride. - size_t chroma_stride = 0; -}; - -/* - * Holds information for compressed image or gain map. - */ -struct jpegr_compressed_struct { - // Pointer to the data location. - void* data; - // Used data length in bytes. - int length; - // Maximum available data length in bytes. - int maxLength; - // Color gamut. - ultrahdr_color_gamut colorGamut; -}; - -/* - * Holds information for EXIF metadata. - */ -struct jpegr_exif_struct { - // Pointer to the data location. - void* data; - // Data length; - size_t length; -}; - -typedef struct jpegr_uncompressed_struct* jr_uncompressed_ptr; -typedef struct jpegr_compressed_struct* jr_compressed_ptr; -typedef struct jpegr_exif_struct* jr_exif_ptr; -typedef struct jpegr_info_struct* jr_info_ptr; - -class JpegR { - public: - /* - * Experimental only - * - * Encode API-0 - * Compress JPEGR image from 10-bit HDR YUV. - * - * Tonemap the HDR input to a SDR image, generate gain map from the HDR and SDR images, - * compress SDR YUV to 8-bit JPEG and append the gain map to the end of the compressed - * JPEG. - * @param p010_image_ptr uncompressed HDR image in P010 color format - * @param hdr_tf transfer function of the HDR image - * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength} - * represents the maximum available size of the destination buffer, and it must be - * set before calling this method. If the encoded JPEGR size exceeds - * {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}. - * @param quality target quality of the JPEG encoding, must be in range of 0-100 where 100 is - * the highest quality - * @param exif pointer to the exif metadata. - * @return NO_ERROR if encoding succeeds, error code if error occurs. - */ - status_t encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, ultrahdr_transfer_function hdr_tf, - jr_compressed_ptr dest, int quality, jr_exif_ptr exif); - - /* - * Encode API-1 - * Compress JPEGR image from 10-bit HDR YUV and 8-bit SDR YUV. - * - * Generate gain map from the HDR and SDR inputs, compress SDR YUV to 8-bit JPEG and append - * the gain map to the end of the compressed JPEG. HDR and SDR inputs must be the same - * resolution. SDR input is assumed to use the sRGB transfer function. - * @param p010_image_ptr uncompressed HDR image in P010 color format - * @param yuv420_image_ptr uncompressed SDR image in YUV_420 color format - * @param hdr_tf transfer function of the HDR image - * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength} - * represents the maximum available size of the desitination buffer, and it must be - * set before calling this method. If the encoded JPEGR size exceeds - * {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}. - * @param quality target quality of the JPEG encoding, must be in range of 0-100 where 100 is - * the highest quality - * @param exif pointer to the exif metadata. - * @return NO_ERROR if encoding succeeds, error code if error occurs. - */ - status_t encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, jr_uncompressed_ptr yuv420_image_ptr, - ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest, int quality, - jr_exif_ptr exif); - - /* - * Encode API-2 - * Compress JPEGR image from 10-bit HDR YUV, 8-bit SDR YUV and compressed 8-bit JPEG. - * - * This method requires HAL Hardware JPEG encoder. - * - * Generate gain map from the HDR and SDR inputs, append the gain map to the end of the - * compressed JPEG. Adds an ICC profile if one isn't present in the input JPEG image. HDR and - * SDR inputs must be the same resolution and color space. SDR image is assumed to use the sRGB - * transfer function. - * @param p010_image_ptr uncompressed HDR image in P010 color format - * @param yuv420_image_ptr uncompressed SDR image in YUV_420 color format - * @param yuv420jpg_image_ptr SDR image compressed in jpeg format - * @param hdr_tf transfer function of the HDR image - * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength} - * represents the maximum available size of the desitination buffer, and it must be - * set before calling this method. If the encoded JPEGR size exceeds - * {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}. - * @return NO_ERROR if encoding succeeds, error code if error occurs. - */ - status_t encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, jr_uncompressed_ptr yuv420_image_ptr, - jr_compressed_ptr yuv420jpg_image_ptr, ultrahdr_transfer_function hdr_tf, - jr_compressed_ptr dest); - - /* - * Encode API-3 - * Compress JPEGR image from 10-bit HDR YUV and 8-bit SDR YUV. - * - * This method requires HAL Hardware JPEG encoder. - * - * Decode the compressed 8-bit JPEG image to YUV SDR, generate gain map from the HDR input - * and the decoded SDR result, append the gain map to the end of the compressed JPEG. Adds an - * ICC profile if one isn't present in the input JPEG image. HDR and SDR inputs must be the same - * resolution. JPEG image is assumed to use the sRGB transfer function. - * @param p010_image_ptr uncompressed HDR image in P010 color format - * @param yuv420jpg_image_ptr SDR image compressed in jpeg format - * @param hdr_tf transfer function of the HDR image - * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength} - * represents the maximum available size of the desitination buffer, and it must be - * set before calling this method. If the encoded JPEGR size exceeds - * {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}. - * @return NO_ERROR if encoding succeeds, error code if error occurs. - */ - status_t encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, jr_compressed_ptr yuv420jpg_image_ptr, - ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest); - - /* - * Encode API-4 - * Assemble JPEGR image from SDR JPEG and gainmap JPEG. - * - * Assemble the primary JPEG image, the gain map and the metadata to JPEG/R format. Adds an ICC - * profile if one isn't present in the input JPEG image. - * @param yuv420jpg_image_ptr SDR image compressed in jpeg format - * @param gainmapjpg_image_ptr gain map image compressed in jpeg format - * @param metadata metadata to be written in XMP of the primary jpeg - * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength} - * represents the maximum available size of the desitination buffer, and it must be - * set before calling this method. If the encoded JPEGR size exceeds - * {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}. - * @return NO_ERROR if encoding succeeds, error code if error occurs. - */ - status_t encodeJPEGR(jr_compressed_ptr yuv420jpg_image_ptr, - jr_compressed_ptr gainmapjpg_image_ptr, ultrahdr_metadata_ptr metadata, - jr_compressed_ptr dest); - - /* - * Decode API - * Decompress JPEGR image. - * - * This method assumes that the JPEGR image contains an ICC profile with primaries that match - * those of a color gamut that this library is aware of; Bt.709, Display-P3, or Bt.2100. It also - * assumes the base image uses the sRGB transfer function. - * - * This method only supports single gain map metadata values for fields that allow multi-channel - * metadata values. - * @param jpegr_image_ptr compressed JPEGR image. - * @param dest destination of the uncompressed JPEGR image. - * @param max_display_boost (optional) the maximum available boost supported by a display, - * the value must be greater than or equal to 1.0. - * @param exif destination of the decoded EXIF metadata. The default value is NULL where the - decoder will do nothing about it. If configured not NULL the decoder will write - EXIF data into this structure. The format is defined in {@code jpegr_exif_struct} - * @param output_format flag for setting output color format. Its value configures the output - color format. The default value is {@code JPEGR_OUTPUT_HDR_LINEAR}. - ---------------------------------------------------------------------- - | output_format | decoded color format to be written | - ---------------------------------------------------------------------- - | JPEGR_OUTPUT_SDR | RGBA_8888 | - ---------------------------------------------------------------------- - | JPEGR_OUTPUT_HDR_LINEAR | (default)RGBA_F16 linear | - ---------------------------------------------------------------------- - | JPEGR_OUTPUT_HDR_PQ | RGBA_1010102 PQ | - ---------------------------------------------------------------------- - | JPEGR_OUTPUT_HDR_HLG | RGBA_1010102 HLG | - ---------------------------------------------------------------------- - * @param gainmap_image_ptr destination of the decoded gain map. The default value is NULL - where the decoder will do nothing about it. If configured not NULL - the decoder will write the decoded gain_map data into this - structure. The format is defined in - {@code jpegr_uncompressed_struct}. - * @param metadata destination of the decoded metadata. The default value is NULL where the - decoder will do nothing about it. If configured not NULL the decoder will - write metadata into this structure. the format of metadata is defined in - {@code ultrahdr_metadata_struct}. - * @return NO_ERROR if decoding succeeds, error code if error occurs. - */ - status_t decodeJPEGR(jr_compressed_ptr jpegr_image_ptr, jr_uncompressed_ptr dest, - float max_display_boost = FLT_MAX, jr_exif_ptr exif = nullptr, - ultrahdr_output_format output_format = ULTRAHDR_OUTPUT_HDR_LINEAR, - jr_uncompressed_ptr gainmap_image_ptr = nullptr, - ultrahdr_metadata_ptr metadata = nullptr); - - /* - * Gets Info from JPEGR file without decoding it. - * - * This method only supports single gain map metadata values for fields that allow multi-channel - * metadata values. - * - * The output is filled jpegr_info structure - * @param jpegr_image_ptr compressed JPEGR image - * @param jpeg_image_info_ptr pointer to jpegr info struct. Members of jpegr_info - * are owned by the caller - * @return NO_ERROR if JPEGR parsing succeeds, error code otherwise - */ - status_t getJPEGRInfo(jr_compressed_ptr jpegr_image_ptr, jr_info_ptr jpeg_image_info_ptr); - - protected: - /* - * This method is called in the encoding pipeline. It will take the uncompressed 8-bit and - * 10-bit yuv images as input, and calculate the uncompressed gain map. The input images - * must be the same resolution. The SDR input is assumed to use the sRGB transfer function. - * - * @param yuv420_image_ptr uncompressed SDR image in YUV_420 color format - * @param p010_image_ptr uncompressed HDR image in P010 color format - * @param hdr_tf transfer function of the HDR image - * @param metadata everything but "version" is filled in this struct - * @param dest location at which gain map image is stored (caller responsible for memory - of data). - * @param sdr_is_601 if true, then use BT.601 decoding of YUV regardless of SDR image gamut - * @return NO_ERROR if calculation succeeds, error code if error occurs. - */ - status_t generateGainMap(jr_uncompressed_ptr yuv420_image_ptr, jr_uncompressed_ptr p010_image_ptr, - ultrahdr_transfer_function hdr_tf, ultrahdr_metadata_ptr metadata, - jr_uncompressed_ptr dest, bool sdr_is_601 = false); - - /* - * This method is called in the decoding pipeline. It will take the uncompressed (decoded) - * 8-bit yuv image, the uncompressed (decoded) gain map, and extracted JPEG/R metadata as - * input, and calculate the 10-bit recovered image. The recovered output image is the same - * color gamut as the SDR image, with HLG transfer function, and is in RGBA1010102 data format. - * The SDR image is assumed to use the sRGB transfer function. The SDR image is also assumed to - * be a decoded JPEG for the purpose of YUV interpration. - * - * @param yuv420_image_ptr uncompressed SDR image in YUV_420 color format - * @param gainmap_image_ptr pointer to uncompressed gain map image struct. - * @param metadata JPEG/R metadata extracted from XMP. - * @param output_format flag for setting output color format. if set to - * {@code JPEGR_OUTPUT_SDR}, decoder will only decode the primary image - * which is SDR. Default value is JPEGR_OUTPUT_HDR_LINEAR. - * @param max_display_boost the maximum available boost supported by a display - * @param dest reconstructed HDR image - * @return NO_ERROR if calculation succeeds, error code if error occurs. - */ - status_t applyGainMap(jr_uncompressed_ptr yuv420_image_ptr, jr_uncompressed_ptr gainmap_image_ptr, - ultrahdr_metadata_ptr metadata, ultrahdr_output_format output_format, - float max_display_boost, jr_uncompressed_ptr dest); - - private: - /* - * This method is called in the encoding pipeline. It will encode the gain map. - * - * @param gainmap_image_ptr pointer to uncompressed gain map image struct - * @param jpeg_enc_obj_ptr helper resource to compress gain map - * @return NO_ERROR if encoding succeeds, error code if error occurs. - */ - status_t compressGainMap(jr_uncompressed_ptr gainmap_image_ptr, - JpegEncoderHelper* jpeg_enc_obj_ptr); - - /* - * This method is called to separate primary image and gain map image from JPEGR - * - * @param jpegr_image_ptr pointer to compressed JPEGR image. - * @param primary_jpg_image_ptr destination of primary image - * @param gainmap_jpg_image_ptr destination of compressed gain map image - * @return NO_ERROR if calculation succeeds, error code if error occurs. - */ - status_t extractPrimaryImageAndGainMap(jr_compressed_ptr jpegr_image_ptr, - jr_compressed_ptr primary_jpg_image_ptr, - jr_compressed_ptr gainmap_jpg_image_ptr); - - /* - * This method is called in the encoding pipeline. It will take the standard 8-bit JPEG image, - * the compressed gain map and optionally the exif package as inputs, and generate the XMP - * metadata, and finally append everything in the order of: - * SOI, APP2(EXIF) (if EXIF is from outside), APP2(XMP), primary image, gain map - * - * Note that in the final JPEG/R output, EXIF package will appear if ONLY ONE of the following - * conditions is fulfilled: - * (1) EXIF package is available from outside input. I.e. pExif != nullptr. - * (2) Input JPEG has EXIF. - * If both conditions are fulfilled, this method will return ERROR_JPEGR_INVALID_INPUT_TYPE - * - * @param primary_jpg_image_ptr destination of primary image - * @param gainmap_jpg_image_ptr destination of compressed gain map image - * @param (nullable) pExif EXIF package - * @param (nullable) pIcc ICC package - * @param icc_size length in bytes of ICC package - * @param metadata JPEG/R metadata to encode in XMP of the jpeg - * @param dest compressed JPEGR image - * @return NO_ERROR if calculation succeeds, error code if error occurs. - */ - status_t appendGainMap(jr_compressed_ptr primary_jpg_image_ptr, - jr_compressed_ptr gainmap_jpg_image_ptr, jr_exif_ptr pExif, void* pIcc, - size_t icc_size, ultrahdr_metadata_ptr metadata, jr_compressed_ptr dest); - - /* - * This method will tone map a HDR image to an SDR image. - * - * @param src pointer to uncompressed HDR image struct. HDR image is expected to be - * in p010 color format - * @param dest pointer to store tonemapped SDR image - */ - status_t toneMap(jr_uncompressed_ptr src, jr_uncompressed_ptr dest); - - /* - * This method will convert a YUV420 image from one YUV encoding to another in-place (eg. - * Bt.709 to Bt.601 YUV encoding). - * - * src_encoding and dest_encoding indicate the encoding via the YUV conversion defined for that - * gamut. P3 indicates Rec.601, since this is how DataSpace encodes Display-P3 YUV data. - * - * @param image the YUV420 image to convert - * @param src_encoding input YUV encoding - * @param dest_encoding output YUV encoding - * @return NO_ERROR if calculation succeeds, error code if error occurs. - */ - status_t convertYuv(jr_uncompressed_ptr image, ultrahdr_color_gamut src_encoding, - ultrahdr_color_gamut dest_encoding); - - /* - * This method will check the validity of the input arguments. - * - * @param p010_image_ptr uncompressed HDR image in P010 color format - * @param yuv420_image_ptr pointer to uncompressed SDR image struct. HDR image is expected to - * be in 420p color format - * @param hdr_tf transfer function of the HDR image - * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength} - * represents the maximum available size of the desitination buffer, and it must be - * set before calling this method. If the encoded JPEGR size exceeds - * {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}. - * @return NO_ERROR if the input args are valid, error code is not valid. - */ - status_t areInputArgumentsValid(jr_uncompressed_ptr p010_image_ptr, - jr_uncompressed_ptr yuv420_image_ptr, - ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest_ptr); - - /* - * This method will check the validity of the input arguments. - * - * @param p010_image_ptr uncompressed HDR image in P010 color format - * @param yuv420_image_ptr pointer to uncompressed SDR image struct. HDR image is expected to - * be in 420p color format - * @param hdr_tf transfer function of the HDR image - * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength} - * represents the maximum available size of the destination buffer, and it must be - * set before calling this method. If the encoded JPEGR size exceeds - * {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}. - * @param quality target quality of the JPEG encoding, must be in range of 0-100 where 100 is - * the highest quality - * @return NO_ERROR if the input args are valid, error code is not valid. - */ - status_t areInputArgumentsValid(jr_uncompressed_ptr p010_image_ptr, - jr_uncompressed_ptr yuv420_image_ptr, - ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest, - int quality); -}; -} // namespace ultrahdr - -#endif // ULTRAHDR_JPEGR_H diff --git a/lib/jpegrutils.cpp b/lib/jpegrutils.cpp deleted file mode 100644 index 2fdc347..0000000 --- a/lib/jpegrutils.cpp +++ /dev/null @@ -1,583 +0,0 @@ -/* - * Copyright 2022 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -#include "ultrahdrcommon.h" -#include "jpegr.h" -#include "jpegrutils.h" - -#include "image_io/xml/xml_reader.h" -#include "image_io/xml/xml_writer.h" -#include "image_io/base/message_handler.h" -#include "image_io/xml/xml_element_rules.h" -#include "image_io/xml/xml_handler.h" -#include "image_io/xml/xml_rule.h" - -using namespace photos_editing_formats::image_io; -using namespace std; - -namespace ultrahdr { -/* - * Helper function used for generating XMP metadata. - * - * @param prefix The prefix part of the name. - * @param suffix The suffix part of the name. - * @return A name of the form "prefix:suffix". - */ -static inline string Name(const string& prefix, const string& suffix) { - std::stringstream ss; - ss << prefix << ":" << suffix; - return ss.str(); -} - -DataStruct::DataStruct(int s) { - data = malloc(s); - length = s; - memset(data, 0, s); - writePos = 0; -} - -DataStruct::~DataStruct() { - if (data != nullptr) { - free(data); - } -} - -void* DataStruct::getData() { return data; } - -int DataStruct::getLength() { return length; } - -int DataStruct::getBytesWritten() { return writePos; } - -bool DataStruct::write8(uint8_t value) { - uint8_t v = value; - return write(&v, 1); -} - -bool DataStruct::write16(uint16_t value) { - uint16_t v = value; - return write(&v, 2); -} -bool DataStruct::write32(uint32_t value) { - uint32_t v = value; - return write(&v, 4); -} - -bool DataStruct::write(const void* src, int size) { - if (writePos + size > length) { - ALOGE("Writing out of boundary: write position: %d, size: %d, capacity: %d", writePos, size, - length); - return false; - } - memcpy((uint8_t*)data + writePos, src, size); - writePos += size; - return true; -} - -/* - * Helper function used for writing data to destination. - */ -status_t Write(jr_compressed_ptr destination, const void* source, size_t length, int& position) { - if (position + length > destination->maxLength) { - return ERROR_JPEGR_BUFFER_TOO_SMALL; - } - - memcpy((uint8_t*)destination->data + sizeof(uint8_t) * position, source, length); - position += length; - return JPEGR_NO_ERROR; -} - -// Extremely simple XML Handler - just searches for interesting elements -class XMPXmlHandler : public XmlHandler { - public: - XMPXmlHandler() : XmlHandler() { - state = NotStrarted; - versionFound = false; - minContentBoostFound = false; - maxContentBoostFound = false; - gammaFound = false; - offsetSdrFound = false; - offsetHdrFound = false; - hdrCapacityMinFound = false; - hdrCapacityMaxFound = false; - baseRenditionIsHdrFound = false; - } - - enum ParseState { NotStrarted, Started, Done }; - - virtual DataMatchResult StartElement(const XmlTokenContext& context) { - string val; - if (context.BuildTokenValue(&val)) { - if (!val.compare(containerName)) { - state = Started; - } else { - if (state != Done) { - state = NotStrarted; - } - } - } - return context.GetResult(); - } - - virtual DataMatchResult FinishElement(const XmlTokenContext& context) { - if (state == Started) { - state = Done; - lastAttributeName = ""; - } - return context.GetResult(); - } - - virtual DataMatchResult AttributeName(const XmlTokenContext& context) { - string val; - if (state == Started) { - if (context.BuildTokenValue(&val)) { - if (!val.compare(versionAttrName)) { - lastAttributeName = versionAttrName; - } else if (!val.compare(maxContentBoostAttrName)) { - lastAttributeName = maxContentBoostAttrName; - } else if (!val.compare(minContentBoostAttrName)) { - lastAttributeName = minContentBoostAttrName; - } else if (!val.compare(gammaAttrName)) { - lastAttributeName = gammaAttrName; - } else if (!val.compare(offsetSdrAttrName)) { - lastAttributeName = offsetSdrAttrName; - } else if (!val.compare(offsetHdrAttrName)) { - lastAttributeName = offsetHdrAttrName; - } else if (!val.compare(hdrCapacityMinAttrName)) { - lastAttributeName = hdrCapacityMinAttrName; - } else if (!val.compare(hdrCapacityMaxAttrName)) { - lastAttributeName = hdrCapacityMaxAttrName; - } else if (!val.compare(baseRenditionIsHdrAttrName)) { - lastAttributeName = baseRenditionIsHdrAttrName; - } else { - lastAttributeName = ""; - } - } - } - return context.GetResult(); - } - - virtual DataMatchResult AttributeValue(const XmlTokenContext& context) { - string val; - if (state == Started) { - if (context.BuildTokenValue(&val, true)) { - if (!lastAttributeName.compare(versionAttrName)) { - versionStr = val; - versionFound = true; - } else if (!lastAttributeName.compare(maxContentBoostAttrName)) { - maxContentBoostStr = val; - maxContentBoostFound = true; - } else if (!lastAttributeName.compare(minContentBoostAttrName)) { - minContentBoostStr = val; - minContentBoostFound = true; - } else if (!lastAttributeName.compare(gammaAttrName)) { - gammaStr = val; - gammaFound = true; - } else if (!lastAttributeName.compare(offsetSdrAttrName)) { - offsetSdrStr = val; - offsetSdrFound = true; - } else if (!lastAttributeName.compare(offsetHdrAttrName)) { - offsetHdrStr = val; - offsetHdrFound = true; - } else if (!lastAttributeName.compare(hdrCapacityMinAttrName)) { - hdrCapacityMinStr = val; - hdrCapacityMinFound = true; - } else if (!lastAttributeName.compare(hdrCapacityMaxAttrName)) { - hdrCapacityMaxStr = val; - hdrCapacityMaxFound = true; - } else if (!lastAttributeName.compare(baseRenditionIsHdrAttrName)) { - baseRenditionIsHdrStr = val; - baseRenditionIsHdrFound = true; - } - } - } - return context.GetResult(); - } - - bool getVersion(string* version, bool* present) { - if (state == Done) { - *version = versionStr; - *present = versionFound; - return true; - } else { - return false; - } - } - - bool getMaxContentBoost(float* max_content_boost, bool* present) { - if (state == Done) { - *present = maxContentBoostFound; - stringstream ss(maxContentBoostStr); - float val; - if (ss >> val) { - *max_content_boost = exp2(val); - return true; - } else { - return false; - } - } else { - return false; - } - } - - bool getMinContentBoost(float* min_content_boost, bool* present) { - if (state == Done) { - *present = minContentBoostFound; - stringstream ss(minContentBoostStr); - float val; - if (ss >> val) { - *min_content_boost = exp2(val); - return true; - } else { - return false; - } - } else { - return false; - } - } - - bool getGamma(float* gamma, bool* present) { - if (state == Done) { - *present = gammaFound; - stringstream ss(gammaStr); - float val; - if (ss >> val) { - *gamma = val; - return true; - } else { - return false; - } - } else { - return false; - } - } - - bool getOffsetSdr(float* offset_sdr, bool* present) { - if (state == Done) { - *present = offsetSdrFound; - stringstream ss(offsetSdrStr); - float val; - if (ss >> val) { - *offset_sdr = val; - return true; - } else { - return false; - } - } else { - return false; - } - } - - bool getOffsetHdr(float* offset_hdr, bool* present) { - if (state == Done) { - *present = offsetHdrFound; - stringstream ss(offsetHdrStr); - float val; - if (ss >> val) { - *offset_hdr = val; - return true; - } else { - return false; - } - } else { - return false; - } - } - - bool getHdrCapacityMin(float* hdr_capacity_min, bool* present) { - if (state == Done) { - *present = hdrCapacityMinFound; - stringstream ss(hdrCapacityMinStr); - float val; - if (ss >> val) { - *hdr_capacity_min = exp2(val); - return true; - } else { - return false; - } - } else { - return false; - } - } - - bool getHdrCapacityMax(float* hdr_capacity_max, bool* present) { - if (state == Done) { - *present = hdrCapacityMaxFound; - stringstream ss(hdrCapacityMaxStr); - float val; - if (ss >> val) { - *hdr_capacity_max = exp2(val); - return true; - } else { - return false; - } - } else { - return false; - } - } - - bool getBaseRenditionIsHdr(bool* base_rendition_is_hdr, bool* present) { - if (state == Done) { - *present = baseRenditionIsHdrFound; - if (!baseRenditionIsHdrStr.compare("False")) { - *base_rendition_is_hdr = false; - return true; - } else if (!baseRenditionIsHdrStr.compare("True")) { - *base_rendition_is_hdr = true; - return true; - } else { - return false; - } - } else { - return false; - } - } - - private: - static const string containerName; - - static const string versionAttrName; - string versionStr; - bool versionFound; - static const string maxContentBoostAttrName; - string maxContentBoostStr; - bool maxContentBoostFound; - static const string minContentBoostAttrName; - string minContentBoostStr; - bool minContentBoostFound; - static const string gammaAttrName; - string gammaStr; - bool gammaFound; - static const string offsetSdrAttrName; - string offsetSdrStr; - bool offsetSdrFound; - static const string offsetHdrAttrName; - string offsetHdrStr; - bool offsetHdrFound; - static const string hdrCapacityMinAttrName; - string hdrCapacityMinStr; - bool hdrCapacityMinFound; - static const string hdrCapacityMaxAttrName; - string hdrCapacityMaxStr; - bool hdrCapacityMaxFound; - static const string baseRenditionIsHdrAttrName; - string baseRenditionIsHdrStr; - bool baseRenditionIsHdrFound; - - string lastAttributeName; - ParseState state; -}; - -// GContainer XMP constants - URI and namespace prefix -const string kContainerUri = "http://ns.google.com/photos/1.0/container/"; -const string kContainerPrefix = "Container"; - -// GContainer XMP constants - element and attribute names -const string kConDirectory = Name(kContainerPrefix, "Directory"); -const string kConItem = Name(kContainerPrefix, "Item"); - -// GContainer XMP constants - names for XMP handlers -const string XMPXmlHandler::containerName = "rdf:Description"; -// Item XMP constants - URI and namespace prefix -const string kItemUri = "http://ns.google.com/photos/1.0/container/item/"; -const string kItemPrefix = "Item"; - -// Item XMP constants - element and attribute names -const string kItemLength = Name(kItemPrefix, "Length"); -const string kItemMime = Name(kItemPrefix, "Mime"); -const string kItemSemantic = Name(kItemPrefix, "Semantic"); - -// Item XMP constants - element and attribute values -const string kSemanticPrimary = "Primary"; -const string kSemanticGainMap = "GainMap"; -const string kMimeImageJpeg = "image/jpeg"; - -// GainMap XMP constants - URI and namespace prefix -const string kGainMapUri = "http://ns.adobe.com/hdr-gain-map/1.0/"; -const string kGainMapPrefix = "hdrgm"; - -// GainMap XMP constants - element and attribute names -const string kMapVersion = Name(kGainMapPrefix, "Version"); -const string kMapGainMapMin = Name(kGainMapPrefix, "GainMapMin"); -const string kMapGainMapMax = Name(kGainMapPrefix, "GainMapMax"); -const string kMapGamma = Name(kGainMapPrefix, "Gamma"); -const string kMapOffsetSdr = Name(kGainMapPrefix, "OffsetSDR"); -const string kMapOffsetHdr = Name(kGainMapPrefix, "OffsetHDR"); -const string kMapHDRCapacityMin = Name(kGainMapPrefix, "HDRCapacityMin"); -const string kMapHDRCapacityMax = Name(kGainMapPrefix, "HDRCapacityMax"); -const string kMapBaseRenditionIsHDR = Name(kGainMapPrefix, "BaseRenditionIsHDR"); - -// GainMap XMP constants - names for XMP handlers -const string XMPXmlHandler::versionAttrName = kMapVersion; -const string XMPXmlHandler::minContentBoostAttrName = kMapGainMapMin; -const string XMPXmlHandler::maxContentBoostAttrName = kMapGainMapMax; -const string XMPXmlHandler::gammaAttrName = kMapGamma; -const string XMPXmlHandler::offsetSdrAttrName = kMapOffsetSdr; -const string XMPXmlHandler::offsetHdrAttrName = kMapOffsetHdr; -const string XMPXmlHandler::hdrCapacityMinAttrName = kMapHDRCapacityMin; -const string XMPXmlHandler::hdrCapacityMaxAttrName = kMapHDRCapacityMax; -const string XMPXmlHandler::baseRenditionIsHdrAttrName = kMapBaseRenditionIsHDR; - -bool getMetadataFromXMP(uint8_t* xmp_data, size_t xmp_size, ultrahdr_metadata_struct* metadata) { - string nameSpace = "http://ns.adobe.com/xap/1.0/\0"; - - if (xmp_size < nameSpace.size() + 2) { - // Data too short - return false; - } - - if (strncmp(reinterpret_cast(xmp_data), nameSpace.c_str(), nameSpace.size())) { - // Not correct namespace - return false; - } - - // Position the pointers to the start of XMP XML portion - xmp_data += nameSpace.size() + 1; - xmp_size -= nameSpace.size() + 1; - XMPXmlHandler handler; - - // We need to remove tail data until the closing tag. Otherwise parser will throw an error. - while (xmp_data[xmp_size - 1] != '>' && xmp_size > 1) { - xmp_size--; - } - - string str(reinterpret_cast(xmp_data), xmp_size); - MessageHandler msg_handler; - unique_ptr rule(new XmlElementRule); - XmlReader reader(&handler, &msg_handler); - reader.StartParse(std::move(rule)); - reader.Parse(str); - reader.FinishParse(); - if (reader.HasErrors()) { - // Parse error - return false; - } - - // Apply default values to any not-present fields, except for Version, - // maxContentBoost, and hdrCapacityMax, which are required. Return false if - // we encounter a present field that couldn't be parsed, since this - // indicates it is invalid (eg. string where there should be a float). - bool present = false; - if (!handler.getVersion(&metadata->version, &present) || !present) { - return false; - } - if (!handler.getMaxContentBoost(&metadata->maxContentBoost, &present) || !present) { - return false; - } - if (!handler.getHdrCapacityMax(&metadata->hdrCapacityMax, &present) || !present) { - return false; - } - if (!handler.getMinContentBoost(&metadata->minContentBoost, &present)) { - if (present) return false; - metadata->minContentBoost = 1.0f; - } - if (!handler.getGamma(&metadata->gamma, &present)) { - if (present) return false; - metadata->gamma = 1.0f; - } - if (!handler.getOffsetSdr(&metadata->offsetSdr, &present)) { - if (present) return false; - metadata->offsetSdr = 1.0f / 64.0f; - } - if (!handler.getOffsetHdr(&metadata->offsetHdr, &present)) { - if (present) return false; - metadata->offsetHdr = 1.0f / 64.0f; - } - if (!handler.getHdrCapacityMin(&metadata->hdrCapacityMin, &present)) { - if (present) return false; - metadata->hdrCapacityMin = 1.0f; - } - - bool base_rendition_is_hdr; - if (!handler.getBaseRenditionIsHdr(&base_rendition_is_hdr, &present)) { - if (present) return false; - base_rendition_is_hdr = false; - } - if (base_rendition_is_hdr) { - ALOGE("Base rendition of HDR is not supported!"); - return false; - } - - return true; -} - -string generateXmpForPrimaryImage(int secondary_image_length, ultrahdr_metadata_struct& metadata) { - const vector kConDirSeq({kConDirectory, string("rdf:Seq")}); - const vector kLiItem({string("rdf:li"), kConItem}); - - std::stringstream ss; - photos_editing_formats::image_io::XmlWriter writer(ss); - writer.StartWritingElement("x:xmpmeta"); - writer.WriteXmlns("x", "adobe:ns:meta/"); - writer.WriteAttributeNameAndValue("x:xmptk", "Adobe XMP Core 5.1.2"); - writer.StartWritingElement("rdf:RDF"); - writer.WriteXmlns("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#"); - writer.StartWritingElement("rdf:Description"); - writer.WriteXmlns(kContainerPrefix, kContainerUri); - writer.WriteXmlns(kItemPrefix, kItemUri); - writer.WriteXmlns(kGainMapPrefix, kGainMapUri); - writer.WriteAttributeNameAndValue(kMapVersion, metadata.version); - - writer.StartWritingElements(kConDirSeq); - - size_t item_depth = writer.StartWritingElement("rdf:li"); - writer.WriteAttributeNameAndValue("rdf:parseType", "Resource"); - writer.StartWritingElement(kConItem); - writer.WriteAttributeNameAndValue(kItemSemantic, kSemanticPrimary); - writer.WriteAttributeNameAndValue(kItemMime, kMimeImageJpeg); - writer.FinishWritingElementsToDepth(item_depth); - - writer.StartWritingElement("rdf:li"); - writer.WriteAttributeNameAndValue("rdf:parseType", "Resource"); - writer.StartWritingElement(kConItem); - writer.WriteAttributeNameAndValue(kItemSemantic, kSemanticGainMap); - writer.WriteAttributeNameAndValue(kItemMime, kMimeImageJpeg); - writer.WriteAttributeNameAndValue(kItemLength, secondary_image_length); - - writer.FinishWriting(); - - return ss.str(); -} - -string generateXmpForSecondaryImage(ultrahdr_metadata_struct& metadata) { - const vector kConDirSeq({kConDirectory, string("rdf:Seq")}); - - std::stringstream ss; - photos_editing_formats::image_io::XmlWriter writer(ss); - writer.StartWritingElement("x:xmpmeta"); - writer.WriteXmlns("x", "adobe:ns:meta/"); - writer.WriteAttributeNameAndValue("x:xmptk", "Adobe XMP Core 5.1.2"); - writer.StartWritingElement("rdf:RDF"); - writer.WriteXmlns("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#"); - writer.StartWritingElement("rdf:Description"); - writer.WriteXmlns(kGainMapPrefix, kGainMapUri); - writer.WriteAttributeNameAndValue(kMapVersion, metadata.version); - writer.WriteAttributeNameAndValue(kMapGainMapMin, log2(metadata.minContentBoost)); - writer.WriteAttributeNameAndValue(kMapGainMapMax, log2(metadata.maxContentBoost)); - writer.WriteAttributeNameAndValue(kMapGamma, metadata.gamma); - writer.WriteAttributeNameAndValue(kMapOffsetSdr, metadata.offsetSdr); - writer.WriteAttributeNameAndValue(kMapOffsetHdr, metadata.offsetHdr); - writer.WriteAttributeNameAndValue(kMapHDRCapacityMin, log2(metadata.hdrCapacityMin)); - writer.WriteAttributeNameAndValue(kMapHDRCapacityMax, log2(metadata.hdrCapacityMax)); - writer.WriteAttributeNameAndValue(kMapBaseRenditionIsHDR, "False"); - writer.FinishWriting(); - - return ss.str(); -} - -} // namespace ultrahdr diff --git a/lib/jpegrutils.h b/lib/jpegrutils.h deleted file mode 100644 index c3fa8ab..0000000 --- a/lib/jpegrutils.h +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright 2022 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ULTRAHDR_JPEGRUTILS_H -#define ULTRAHDR_JPEGRUTILS_H - -#include "ultrahdr.h" -#include "jpegr.h" - -namespace ultrahdr { - -static constexpr uint32_t EndianSwap32(uint32_t value) { - return ((value & 0xFF) << 24) | ((value & 0xFF00) << 8) | ((value & 0xFF0000) >> 8) | - (value >> 24); -} -static inline uint16_t EndianSwap16(uint16_t value) { - return static_cast((value >> 8) | ((value & 0xFF) << 8)); -} - -struct ultrahdr_metadata_struct; -/* - * Mutable data structure. Holds information for metadata. - */ -class DataStruct { - private: - void* data; - int writePos; - int length; - - public: - DataStruct(int s); - ~DataStruct(); - - void* getData(); - int getLength(); - int getBytesWritten(); - bool write8(uint8_t value); - bool write16(uint16_t value); - bool write32(uint32_t value); - bool write(const void* src, int size); -}; - -/* - * Helper function used for writing data to destination. - * - * @param destination destination of the data to be written. - * @param source source of data being written. - * @param length length of the data to be written. - * @param position cursor in desitination where the data is to be written. - * @return status of succeed or error code. - */ -status_t Write(jr_compressed_ptr destination, const void* source, size_t length, int& position); - -/* - * Parses XMP packet and fills metadata with data from XMP - * - * @param xmp_data pointer to XMP packet - * @param xmp_size size of XMP packet - * @param metadata place to store HDR metadata values - * @return true if metadata is successfully retrieved, false otherwise - */ -bool getMetadataFromXMP(uint8_t* xmp_data, size_t xmp_size, ultrahdr_metadata_struct* metadata); - -/* - * This method generates XMP metadata for the primary image. - * - * below is an example of the XMP metadata that this function generates where - * secondary_image_length = 1000 - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * @param secondary_image_length length of secondary image - * @return XMP metadata in type of string - */ -std::string generateXmpForPrimaryImage(int secondary_image_length, - ultrahdr_metadata_struct& metadata); - -/* - * This method generates XMP metadata for the recovery map image. - * - * below is an example of the XMP metadata that this function generates where - * max_content_boost = 8.0 - * min_content_boost = 0.5 - * - * - * - * - * - * - * - * @param metadata JPEG/R metadata to encode as XMP - * @return XMP metadata in type of string - */ -std::string generateXmpForSecondaryImage(ultrahdr_metadata_struct& metadata); -} // namespace ultrahdr - -#endif // ULTRAHDR_JPEGRUTILS_H diff --git a/lib/multipictureformat.cpp b/lib/multipictureformat.cpp deleted file mode 100644 index dc31fbb..0000000 --- a/lib/multipictureformat.cpp +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2023 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "multipictureformat.h" - -namespace ultrahdr { -size_t calculateMpfSize() { - return sizeof(kMpfSig) + // Signature - kMpEndianSize + // Endianness - sizeof(uint32_t) + // Index IFD Offset - sizeof(uint16_t) + // Tag count - kTagSerializedCount * kTagSize + // 3 tags at 12 bytes each - sizeof(uint32_t) + // Attribute IFD offset - kNumPictures * kMPEntrySize; // MP Entries for each image -} - -std::shared_ptr generateMpf(int primary_image_size, int primary_image_offset, - int secondary_image_size, int secondary_image_offset) { - size_t mpf_size = calculateMpfSize(); - std::shared_ptr dataStruct = std::make_shared(mpf_size); - - dataStruct->write(static_cast(kMpfSig), sizeof(kMpfSig)); -#if USE_BIG_ENDIAN_IN_MPF - dataStruct->write(static_cast(kMpBigEndian), kMpEndianSize); -#else - dataStruct->write(static_cast(kMpLittleEndian), kMpEndianSize); -#endif - - // Set the Index IFD offset be the position after the endianness value and this offset. - constexpr uint32_t indexIfdOffset = static_cast(kMpEndianSize + sizeof(kMpfSig)); - dataStruct->write32(Endian_SwapBE32(indexIfdOffset)); - - // We will write 3 tags (version, number of images, MP entries). - dataStruct->write16(Endian_SwapBE16(kTagSerializedCount)); - - // Write the version tag. - dataStruct->write16(Endian_SwapBE16(kVersionTag)); - dataStruct->write16(Endian_SwapBE16(kVersionType)); - dataStruct->write32(Endian_SwapBE32(kVersionCount)); - dataStruct->write(kVersionExpected, kVersionSize); - - // Write the number of images. - dataStruct->write16(Endian_SwapBE16(kNumberOfImagesTag)); - dataStruct->write16(Endian_SwapBE16(kNumberOfImagesType)); - dataStruct->write32(Endian_SwapBE32(kNumberOfImagesCount)); - dataStruct->write32(Endian_SwapBE32(kNumPictures)); - - // Write the MP entries. - dataStruct->write16(Endian_SwapBE16(kMPEntryTag)); - dataStruct->write16(Endian_SwapBE16(kMPEntryType)); - dataStruct->write32(Endian_SwapBE32(kMPEntrySize * kNumPictures)); - const uint32_t mpEntryOffset = - static_cast(dataStruct->getBytesWritten() - // The bytes written so far - sizeof(kMpfSig) + // Excluding the MPF signature - sizeof(uint32_t) + // The 4 bytes for this offset - sizeof(uint32_t)); // The 4 bytes for the attribute IFD offset. - dataStruct->write32(Endian_SwapBE32(mpEntryOffset)); - - // Write the attribute IFD offset (zero because we don't write it). - dataStruct->write32(0); - - // Write the MP entries for primary image - dataStruct->write32(Endian_SwapBE32(kMPEntryAttributeFormatJpeg | kMPEntryAttributeTypePrimary)); - dataStruct->write32(Endian_SwapBE32(primary_image_size)); - dataStruct->write32(Endian_SwapBE32(primary_image_offset)); - dataStruct->write16(0); - dataStruct->write16(0); - - // Write the MP entries for secondary image - dataStruct->write32(Endian_SwapBE32(kMPEntryAttributeFormatJpeg)); - dataStruct->write32(Endian_SwapBE32(secondary_image_size)); - dataStruct->write32(Endian_SwapBE32(secondary_image_offset)); - dataStruct->write16(0); - dataStruct->write16(0); - - return dataStruct; -} - -} // namespace ultrahdr diff --git a/lib/multipictureformat.h b/lib/multipictureformat.h deleted file mode 100644 index 888f233..0000000 --- a/lib/multipictureformat.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2022 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ULTRAHDR_MULTIPICTUREFORMAT_H -#define ULTRAHDR_MULTIPICTUREFORMAT_H - -#include - -#ifndef USE_BIG_ENDIAN_IN_MPF -#define USE_BIG_ENDIAN_IN_MPF true -#endif - -#undef Endian_SwapBE32 -#undef Endian_SwapBE16 -#if USE_BIG_ENDIAN_IN_MPF -#define Endian_SwapBE32(n) EndianSwap32(n) -#define Endian_SwapBE16(n) EndianSwap16(n) -#else -#define Endian_SwapBE32(n) (n) -#define Endian_SwapBE16(n) (n) -#endif - -#include "ultrahdr.h" -#include "jpegr.h" -#include "gainmapmath.h" -#include "jpegrutils.h" - -namespace ultrahdr { - -constexpr size_t kNumPictures = 2; -constexpr size_t kMpEndianSize = 4; -constexpr uint16_t kTagSerializedCount = 3; -constexpr uint32_t kTagSize = 12; - -constexpr uint16_t kTypeLong = 0x4; -constexpr uint16_t kTypeUndefined = 0x7; - -static constexpr uint8_t kMpfSig[] = {'M', 'P', 'F', '\0'}; -constexpr uint8_t kMpLittleEndian[kMpEndianSize] = {0x49, 0x49, 0x2A, 0x00}; -constexpr uint8_t kMpBigEndian[kMpEndianSize] = {0x4D, 0x4D, 0x00, 0x2A}; - -constexpr uint16_t kVersionTag = 0xB000; -constexpr uint16_t kVersionType = kTypeUndefined; -constexpr uint32_t kVersionCount = 4; -constexpr size_t kVersionSize = 4; -constexpr uint8_t kVersionExpected[kVersionSize] = {'0', '1', '0', '0'}; - -constexpr uint16_t kNumberOfImagesTag = 0xB001; -constexpr uint16_t kNumberOfImagesType = kTypeLong; -constexpr uint32_t kNumberOfImagesCount = 1; - -constexpr uint16_t kMPEntryTag = 0xB002; -constexpr uint16_t kMPEntryType = kTypeUndefined; -constexpr uint32_t kMPEntrySize = 16; - -constexpr uint32_t kMPEntryAttributeFormatJpeg = 0x0000000; -constexpr uint32_t kMPEntryAttributeTypePrimary = 0x030000; - -size_t calculateMpfSize(); -std::shared_ptr generateMpf(int primary_image_size, int primary_image_offset, - int secondary_image_size, int secondary_image_offset); - -} // namespace ultrahdr - -#endif // ULTRAHDR_MULTIPICTUREFORMAT_H diff --git a/lib/src/gainmapmath.cpp b/lib/src/gainmapmath.cpp new file mode 100644 index 0000000..8ace1e7 --- /dev/null +++ b/lib/src/gainmapmath.cpp @@ -0,0 +1,733 @@ +/* + * Copyright 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ultrahdr/gainmapmath.h" + +namespace ultrahdr { + +static const std::vector kPqOETF = [] { + std::vector result; + for (size_t idx = 0; idx < kPqOETFNumEntries; idx++) { + float value = static_cast(idx) / static_cast(kPqOETFNumEntries - 1); + result.push_back(pqOetf(value)); + } + return result; +}(); + +static const std::vector kPqInvOETF = [] { + std::vector result; + for (size_t idx = 0; idx < kPqInvOETFNumEntries; idx++) { + float value = static_cast(idx) / static_cast(kPqInvOETFNumEntries - 1); + result.push_back(pqInvOetf(value)); + } + return result; +}(); + +static const std::vector kHlgOETF = [] { + std::vector result; + for (size_t idx = 0; idx < kHlgOETFNumEntries; idx++) { + float value = static_cast(idx) / static_cast(kHlgOETFNumEntries - 1); + result.push_back(hlgOetf(value)); + } + return result; +}(); + +static const std::vector kHlgInvOETF = [] { + std::vector result; + for (size_t idx = 0; idx < kHlgInvOETFNumEntries; idx++) { + float value = static_cast(idx) / static_cast(kHlgInvOETFNumEntries - 1); + result.push_back(hlgInvOetf(value)); + } + return result; +}(); + +static const std::vector kSrgbInvOETF = [] { + std::vector result; + for (size_t idx = 0; idx < kSrgbInvOETFNumEntries; idx++) { + float value = static_cast(idx) / static_cast(kSrgbInvOETFNumEntries - 1); + result.push_back(srgbInvOetf(value)); + } + return result; +}(); + +// Use Shepard's method for inverse distance weighting. For more information: +// en.wikipedia.org/wiki/Inverse_distance_weighting#Shepard's_method + +float ShepardsIDW::euclideanDistance(float x1, float x2, float y1, float y2) { + return sqrt(((y2 - y1) * (y2 - y1)) + (x2 - x1) * (x2 - x1)); +} + +void ShepardsIDW::fillShepardsIDW(float* weights, int incR, int incB) { + for (int y = 0; y < mMapScaleFactor; y++) { + for (int x = 0; x < mMapScaleFactor; x++) { + float pos_x = ((float)x) / mMapScaleFactor; + float pos_y = ((float)y) / mMapScaleFactor; + int curr_x = floor(pos_x); + int curr_y = floor(pos_y); + int next_x = curr_x + incR; + int next_y = curr_y + incB; + float e1_distance = euclideanDistance(pos_x, curr_x, pos_y, curr_y); + int index = y * mMapScaleFactor * 4 + x * 4; + if (e1_distance == 0) { + weights[index++] = 1.f; + weights[index++] = 0.f; + weights[index++] = 0.f; + weights[index++] = 0.f; + } else { + float e1_weight = 1.f / e1_distance; + + float e2_distance = euclideanDistance(pos_x, curr_x, pos_y, next_y); + float e2_weight = 1.f / e2_distance; + + float e3_distance = euclideanDistance(pos_x, next_x, pos_y, curr_y); + float e3_weight = 1.f / e3_distance; + + float e4_distance = euclideanDistance(pos_x, next_x, pos_y, next_y); + float e4_weight = 1.f / e4_distance; + + float total_weight = e1_weight + e2_weight + e3_weight + e4_weight; + + weights[index++] = e1_weight / total_weight; + weights[index++] = e2_weight / total_weight; + weights[index++] = e3_weight / total_weight; + weights[index++] = e4_weight / total_weight; + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +// sRGB transformations + +static const float kMaxPixelFloat = 1.0f; +static float clampPixelFloat(float value) { + return (value < 0.0f) ? 0.0f : (value > kMaxPixelFloat) ? kMaxPixelFloat : value; +} + +// See IEC 61966-2-1/Amd 1:2003, Equation F.7. +static const float kSrgbR = 0.2126f, kSrgbG = 0.7152f, kSrgbB = 0.0722f; + +float srgbLuminance(Color e) { return kSrgbR * e.r + kSrgbG * e.g + kSrgbB * e.b; } + +// See ITU-R BT.709-6, Section 3. +// Uses the same coefficients for deriving luma signal as +// IEC 61966-2-1/Amd 1:2003 states for luminance, so we reuse the luminance +// function above. +static const float kSrgbCb = 1.8556f, kSrgbCr = 1.5748f; + +Color srgbRgbToYuv(Color e_gamma) { + float y_gamma = srgbLuminance(e_gamma); + return {{{y_gamma, (e_gamma.b - y_gamma) / kSrgbCb, (e_gamma.r - y_gamma) / kSrgbCr}}}; +} + +// See ITU-R BT.709-6, Section 3. +// Same derivation to BT.2100's YUV->RGB, below. Similar to srgbRgbToYuv, we +// can reuse the luminance coefficients since they are the same. +static const float kSrgbGCb = kSrgbB * kSrgbCb / kSrgbG; +static const float kSrgbGCr = kSrgbR * kSrgbCr / kSrgbG; + +Color srgbYuvToRgb(Color e_gamma) { + return {{{clampPixelFloat(e_gamma.y + kSrgbCr * e_gamma.v), + clampPixelFloat(e_gamma.y - kSrgbGCb * e_gamma.u - kSrgbGCr * e_gamma.v), + clampPixelFloat(e_gamma.y + kSrgbCb * e_gamma.u)}}}; +} + +// See IEC 61966-2-1/Amd 1:2003, Equations F.5 and F.6. +float srgbInvOetf(float e_gamma) { + if (e_gamma <= 0.04045f) { + return e_gamma / 12.92f; + } else { + return pow((e_gamma + 0.055f) / 1.055f, 2.4); + } +} + +Color srgbInvOetf(Color e_gamma) { + return {{{srgbInvOetf(e_gamma.r), srgbInvOetf(e_gamma.g), srgbInvOetf(e_gamma.b)}}}; +} + +// See IEC 61966-2-1, Equations F.5 and F.6. +float srgbInvOetfLUT(float e_gamma) { + uint32_t value = static_cast(e_gamma * (kSrgbInvOETFNumEntries - 1) + 0.5); + // TODO() : Remove once conversion modules have appropriate clamping in place + value = CLIP3(value, 0, kSrgbInvOETFNumEntries - 1); + return kSrgbInvOETF[value]; +} + +Color srgbInvOetfLUT(Color e_gamma) { + return {{{srgbInvOetfLUT(e_gamma.r), srgbInvOetfLUT(e_gamma.g), srgbInvOetfLUT(e_gamma.b)}}}; +} + +//////////////////////////////////////////////////////////////////////////////// +// Display-P3 transformations + +// See SMPTE EG 432-1, Equation 7-8. +static const float kP3R = 0.20949f, kP3G = 0.72160f, kP3B = 0.06891f; + +float p3Luminance(Color e) { return kP3R * e.r + kP3G * e.g + kP3B * e.b; } + +// See ITU-R BT.601-7, Sections 2.5.1 and 2.5.2. +// Unfortunately, calculation of luma signal differs from calculation of +// luminance for Display-P3, so we can't reuse p3Luminance here. +static const float kP3YR = 0.299f, kP3YG = 0.587f, kP3YB = 0.114f; +static const float kP3Cb = 1.772f, kP3Cr = 1.402f; + +Color p3RgbToYuv(Color e_gamma) { + float y_gamma = kP3YR * e_gamma.r + kP3YG * e_gamma.g + kP3YB * e_gamma.b; + return {{{y_gamma, (e_gamma.b - y_gamma) / kP3Cb, (e_gamma.r - y_gamma) / kP3Cr}}}; +} + +// See ITU-R BT.601-7, Sections 2.5.1 and 2.5.2. +// Same derivation to BT.2100's YUV->RGB, below. Similar to p3RgbToYuv, we must +// use luma signal coefficients rather than the luminance coefficients. +static const float kP3GCb = kP3YB * kP3Cb / kP3YG; +static const float kP3GCr = kP3YR * kP3Cr / kP3YG; + +Color p3YuvToRgb(Color e_gamma) { + return {{{clampPixelFloat(e_gamma.y + kP3Cr * e_gamma.v), + clampPixelFloat(e_gamma.y - kP3GCb * e_gamma.u - kP3GCr * e_gamma.v), + clampPixelFloat(e_gamma.y + kP3Cb * e_gamma.u)}}}; +} + +//////////////////////////////////////////////////////////////////////////////// +// BT.2100 transformations - according to ITU-R BT.2100-2 + +// See ITU-R BT.2100-2, Table 5, HLG Reference OOTF +static const float kBt2100R = 0.2627f, kBt2100G = 0.6780f, kBt2100B = 0.0593f; + +float bt2100Luminance(Color e) { return kBt2100R * e.r + kBt2100G * e.g + kBt2100B * e.b; } + +// See ITU-R BT.2100-2, Table 6, Derivation of colour difference signals. +// BT.2100 uses the same coefficients for calculating luma signal and luminance, +// so we reuse the luminance function here. +static const float kBt2100Cb = 1.8814f, kBt2100Cr = 1.4746f; + +Color bt2100RgbToYuv(Color e_gamma) { + float y_gamma = bt2100Luminance(e_gamma); + return {{{y_gamma, (e_gamma.b - y_gamma) / kBt2100Cb, (e_gamma.r - y_gamma) / kBt2100Cr}}}; +} + +// See ITU-R BT.2100-2, Table 6, Derivation of colour difference signals. +// +// Similar to bt2100RgbToYuv above, we can reuse the luminance coefficients. +// +// Derived by inversing bt2100RgbToYuv. The derivation for R and B are pretty +// straight forward; we just invert the formulas for U and V above. But deriving +// the formula for G is a bit more complicated: +// +// Start with equation for luminance: +// Y = kBt2100R * R + kBt2100G * G + kBt2100B * B +// Solve for G: +// G = (Y - kBt2100R * R - kBt2100B * B) / kBt2100B +// Substitute equations for R and B in terms YUV: +// G = (Y - kBt2100R * (Y + kBt2100Cr * V) - kBt2100B * (Y + kBt2100Cb * U)) / kBt2100B +// Simplify: +// G = Y * ((1 - kBt2100R - kBt2100B) / kBt2100G) +// + U * (kBt2100B * kBt2100Cb / kBt2100G) +// + V * (kBt2100R * kBt2100Cr / kBt2100G) +// +// We then get the following coeficients for calculating G from YUV: +// +// Coef for Y = (1 - kBt2100R - kBt2100B) / kBt2100G = 1 +// Coef for U = kBt2100B * kBt2100Cb / kBt2100G = kBt2100GCb = ~0.1645 +// Coef for V = kBt2100R * kBt2100Cr / kBt2100G = kBt2100GCr = ~0.5713 + +static const float kBt2100GCb = kBt2100B * kBt2100Cb / kBt2100G; +static const float kBt2100GCr = kBt2100R * kBt2100Cr / kBt2100G; + +Color bt2100YuvToRgb(Color e_gamma) { + return {{{clampPixelFloat(e_gamma.y + kBt2100Cr * e_gamma.v), + clampPixelFloat(e_gamma.y - kBt2100GCb * e_gamma.u - kBt2100GCr * e_gamma.v), + clampPixelFloat(e_gamma.y + kBt2100Cb * e_gamma.u)}}}; +} + +// See ITU-R BT.2100-2, Table 5, HLG Reference OETF. +static const float kHlgA = 0.17883277f, kHlgB = 0.28466892f, kHlgC = 0.55991073; + +float hlgOetf(float e) { + if (e <= 1.0f / 12.0f) { + return sqrt(3.0f * e); + } else { + return kHlgA * log(12.0f * e - kHlgB) + kHlgC; + } +} + +Color hlgOetf(Color e) { return {{{hlgOetf(e.r), hlgOetf(e.g), hlgOetf(e.b)}}}; } + +float hlgOetfLUT(float e) { + uint32_t value = static_cast(e * (kHlgOETFNumEntries - 1) + 0.5); + // TODO() : Remove once conversion modules have appropriate clamping in place + value = CLIP3(value, 0, kHlgOETFNumEntries - 1); + + return kHlgOETF[value]; +} + +Color hlgOetfLUT(Color e) { return {{{hlgOetfLUT(e.r), hlgOetfLUT(e.g), hlgOetfLUT(e.b)}}}; } + +// See ITU-R BT.2100-2, Table 5, HLG Reference EOTF. +float hlgInvOetf(float e_gamma) { + if (e_gamma <= 0.5f) { + return pow(e_gamma, 2.0f) / 3.0f; + } else { + return (exp((e_gamma - kHlgC) / kHlgA) + kHlgB) / 12.0f; + } +} + +Color hlgInvOetf(Color e_gamma) { + return {{{hlgInvOetf(e_gamma.r), hlgInvOetf(e_gamma.g), hlgInvOetf(e_gamma.b)}}}; +} + +float hlgInvOetfLUT(float e_gamma) { + uint32_t value = static_cast(e_gamma * (kHlgInvOETFNumEntries - 1) + 0.5); + // TODO() : Remove once conversion modules have appropriate clamping in place + value = CLIP3(value, 0, kHlgInvOETFNumEntries - 1); + + return kHlgInvOETF[value]; +} + +Color hlgInvOetfLUT(Color e_gamma) { + return {{{hlgInvOetfLUT(e_gamma.r), hlgInvOetfLUT(e_gamma.g), hlgInvOetfLUT(e_gamma.b)}}}; +} + +// See ITU-R BT.2100-2, Table 4, Reference PQ OETF. +static const float kPqM1 = 2610.0f / 16384.0f, kPqM2 = 2523.0f / 4096.0f * 128.0f; +static const float kPqC1 = 3424.0f / 4096.0f, kPqC2 = 2413.0f / 4096.0f * 32.0f, + kPqC3 = 2392.0f / 4096.0f * 32.0f; + +float pqOetf(float e) { + if (e <= 0.0f) return 0.0f; + return pow((kPqC1 + kPqC2 * pow(e, kPqM1)) / (1 + kPqC3 * pow(e, kPqM1)), kPqM2); +} + +Color pqOetf(Color e) { return {{{pqOetf(e.r), pqOetf(e.g), pqOetf(e.b)}}}; } + +float pqOetfLUT(float e) { + uint32_t value = static_cast(e * (kPqOETFNumEntries - 1) + 0.5); + // TODO() : Remove once conversion modules have appropriate clamping in place + value = CLIP3(value, 0, kPqOETFNumEntries - 1); + + return kPqOETF[value]; +} + +Color pqOetfLUT(Color e) { return {{{pqOetfLUT(e.r), pqOetfLUT(e.g), pqOetfLUT(e.b)}}}; } + +// Derived from the inverse of the Reference PQ OETF. +static const float kPqInvA = 128.0f, kPqInvB = 107.0f, kPqInvC = 2413.0f, kPqInvD = 2392.0f, + kPqInvE = 6.2773946361f, kPqInvF = 0.0126833f; + +float pqInvOetf(float e_gamma) { + // This equation blows up if e_gamma is 0.0, and checking on <= 0.0 doesn't + // always catch 0.0. So, check on 0.0001, since anything this small will + // effectively be crushed to zero anyways. + if (e_gamma <= 0.0001f) return 0.0f; + return pow( + (kPqInvA * pow(e_gamma, kPqInvF) - kPqInvB) / (kPqInvC - kPqInvD * pow(e_gamma, kPqInvF)), + kPqInvE); +} + +Color pqInvOetf(Color e_gamma) { + return {{{pqInvOetf(e_gamma.r), pqInvOetf(e_gamma.g), pqInvOetf(e_gamma.b)}}}; +} + +float pqInvOetfLUT(float e_gamma) { + uint32_t value = static_cast(e_gamma * (kPqInvOETFNumEntries - 1) + 0.5); + // TODO() : Remove once conversion modules have appropriate clamping in place + value = CLIP3(value, 0, kPqInvOETFNumEntries - 1); + + return kPqInvOETF[value]; +} + +Color pqInvOetfLUT(Color e_gamma) { + return {{{pqInvOetfLUT(e_gamma.r), pqInvOetfLUT(e_gamma.g), pqInvOetfLUT(e_gamma.b)}}}; +} + +//////////////////////////////////////////////////////////////////////////////// +// Color conversions + +Color bt709ToP3(Color e) { + return {{{0.82254f * e.r + 0.17755f * e.g + 0.00006f * e.b, + 0.03312f * e.r + 0.96684f * e.g + -0.00001f * e.b, + 0.01706f * e.r + 0.07240f * e.g + 0.91049f * e.b}}}; +} + +Color bt709ToBt2100(Color e) { + return {{{0.62740f * e.r + 0.32930f * e.g + 0.04332f * e.b, + 0.06904f * e.r + 0.91958f * e.g + 0.01138f * e.b, + 0.01636f * e.r + 0.08799f * e.g + 0.89555f * e.b}}}; +} + +Color p3ToBt709(Color e) { + return {{{1.22482f * e.r + -0.22490f * e.g + -0.00007f * e.b, + -0.04196f * e.r + 1.04199f * e.g + 0.00001f * e.b, + -0.01961f * e.r + -0.07865f * e.g + 1.09831f * e.b}}}; +} + +Color p3ToBt2100(Color e) { + return {{{0.75378f * e.r + 0.19862f * e.g + 0.04754f * e.b, + 0.04576f * e.r + 0.94177f * e.g + 0.01250f * e.b, + -0.00121f * e.r + 0.01757f * e.g + 0.98359f * e.b}}}; +} + +Color bt2100ToBt709(Color e) { + return {{{1.66045f * e.r + -0.58764f * e.g + -0.07286f * e.b, + -0.12445f * e.r + 1.13282f * e.g + -0.00837f * e.b, + -0.01811f * e.r + -0.10057f * e.g + 1.11878f * e.b}}}; +} + +Color bt2100ToP3(Color e) { + return {{{1.34369f * e.r + -0.28223f * e.g + -0.06135f * e.b, + -0.06533f * e.r + 1.07580f * e.g + -0.01051f * e.b, + 0.00283f * e.r + -0.01957f * e.g + 1.01679f * e.b}}}; +} + +// TODO: confirm we always want to convert like this before calculating +// luminance. +ColorTransformFn getHdrConversionFn(ultrahdr_color_gamut sdr_gamut, + ultrahdr_color_gamut hdr_gamut) { + switch (sdr_gamut) { + case ULTRAHDR_COLORGAMUT_BT709: + switch (hdr_gamut) { + case ULTRAHDR_COLORGAMUT_BT709: + return identityConversion; + case ULTRAHDR_COLORGAMUT_P3: + return p3ToBt709; + case ULTRAHDR_COLORGAMUT_BT2100: + return bt2100ToBt709; + case ULTRAHDR_COLORGAMUT_UNSPECIFIED: + return nullptr; + } + break; + case ULTRAHDR_COLORGAMUT_P3: + switch (hdr_gamut) { + case ULTRAHDR_COLORGAMUT_BT709: + return bt709ToP3; + case ULTRAHDR_COLORGAMUT_P3: + return identityConversion; + case ULTRAHDR_COLORGAMUT_BT2100: + return bt2100ToP3; + case ULTRAHDR_COLORGAMUT_UNSPECIFIED: + return nullptr; + } + break; + case ULTRAHDR_COLORGAMUT_BT2100: + switch (hdr_gamut) { + case ULTRAHDR_COLORGAMUT_BT709: + return bt709ToBt2100; + case ULTRAHDR_COLORGAMUT_P3: + return p3ToBt2100; + case ULTRAHDR_COLORGAMUT_BT2100: + return identityConversion; + case ULTRAHDR_COLORGAMUT_UNSPECIFIED: + return nullptr; + } + break; + case ULTRAHDR_COLORGAMUT_UNSPECIFIED: + return nullptr; + } + return nullptr; +} + +// All of these conversions are derived from the respective input YUV->RGB conversion followed by +// the RGB->YUV for the receiving encoding. They are consistent with the RGB<->YUV functions in this +// file, given that we uses BT.709 encoding for sRGB and BT.601 encoding for Display-P3, to match +// DataSpace. + +Color yuv709To601(Color e_gamma) { + return {{{1.0f * e_gamma.y + 0.101579f * e_gamma.u + 0.196076f * e_gamma.v, + 0.0f * e_gamma.y + 0.989854f * e_gamma.u + -0.110653f * e_gamma.v, + 0.0f * e_gamma.y + -0.072453f * e_gamma.u + 0.983398f * e_gamma.v}}}; +} + +Color yuv709To2100(Color e_gamma) { + return {{{1.0f * e_gamma.y + -0.016969f * e_gamma.u + 0.096312f * e_gamma.v, + 0.0f * e_gamma.y + 0.995306f * e_gamma.u + -0.051192f * e_gamma.v, + 0.0f * e_gamma.y + 0.011507f * e_gamma.u + 1.002637f * e_gamma.v}}}; +} + +Color yuv601To709(Color e_gamma) { + return {{{1.0f * e_gamma.y + -0.118188f * e_gamma.u + -0.212685f * e_gamma.v, + 0.0f * e_gamma.y + 1.018640f * e_gamma.u + 0.114618f * e_gamma.v, + 0.0f * e_gamma.y + 0.075049f * e_gamma.u + 1.025327f * e_gamma.v}}}; +} + +Color yuv601To2100(Color e_gamma) { + return {{{1.0f * e_gamma.y + -0.128245f * e_gamma.u + -0.115879f * e_gamma.v, + 0.0f * e_gamma.y + 1.010016f * e_gamma.u + 0.061592f * e_gamma.v, + 0.0f * e_gamma.y + 0.086969f * e_gamma.u + 1.029350f * e_gamma.v}}}; +} + +Color yuv2100To709(Color e_gamma) { + return {{{1.0f * e_gamma.y + 0.018149f * e_gamma.u + -0.095132f * e_gamma.v, + 0.0f * e_gamma.y + 1.004123f * e_gamma.u + 0.051267f * e_gamma.v, + 0.0f * e_gamma.y + -0.011524f * e_gamma.u + 0.996782f * e_gamma.v}}}; +} + +Color yuv2100To601(Color e_gamma) { + return {{{1.0f * e_gamma.y + 0.117887f * e_gamma.u + 0.105521f * e_gamma.v, + 0.0f * e_gamma.y + 0.995211f * e_gamma.u + -0.059549f * e_gamma.v, + 0.0f * e_gamma.y + -0.084085f * e_gamma.u + 0.976518f * e_gamma.v}}}; +} + +void transformYuv420(jr_uncompressed_ptr image, size_t x_chroma, size_t y_chroma, + ColorTransformFn fn) { + Color yuv1 = getYuv420Pixel(image, x_chroma * 2, y_chroma * 2); + Color yuv2 = getYuv420Pixel(image, x_chroma * 2 + 1, y_chroma * 2); + Color yuv3 = getYuv420Pixel(image, x_chroma * 2, y_chroma * 2 + 1); + Color yuv4 = getYuv420Pixel(image, x_chroma * 2 + 1, y_chroma * 2 + 1); + + yuv1 = fn(yuv1); + yuv2 = fn(yuv2); + yuv3 = fn(yuv3); + yuv4 = fn(yuv4); + + Color new_uv = (yuv1 + yuv2 + yuv3 + yuv4) / 4.0f; + + size_t pixel_y1_idx = x_chroma * 2 + y_chroma * 2 * image->luma_stride; + size_t pixel_y2_idx = (x_chroma * 2 + 1) + y_chroma * 2 * image->luma_stride; + size_t pixel_y3_idx = x_chroma * 2 + (y_chroma * 2 + 1) * image->luma_stride; + size_t pixel_y4_idx = (x_chroma * 2 + 1) + (y_chroma * 2 + 1) * image->luma_stride; + + uint8_t& y1_uint = reinterpret_cast(image->data)[pixel_y1_idx]; + uint8_t& y2_uint = reinterpret_cast(image->data)[pixel_y2_idx]; + uint8_t& y3_uint = reinterpret_cast(image->data)[pixel_y3_idx]; + uint8_t& y4_uint = reinterpret_cast(image->data)[pixel_y4_idx]; + + size_t pixel_count = image->chroma_stride * image->height / 2; + size_t pixel_uv_idx = x_chroma + y_chroma * (image->chroma_stride); + + uint8_t& u_uint = reinterpret_cast(image->chroma_data)[pixel_uv_idx]; + uint8_t& v_uint = reinterpret_cast(image->chroma_data)[pixel_count + pixel_uv_idx]; + + y1_uint = static_cast(CLIP3((yuv1.y * 255.0f + 0.5f), 0, 255)); + y2_uint = static_cast(CLIP3((yuv2.y * 255.0f + 0.5f), 0, 255)); + y3_uint = static_cast(CLIP3((yuv3.y * 255.0f + 0.5f), 0, 255)); + y4_uint = static_cast(CLIP3((yuv4.y * 255.0f + 0.5f), 0, 255)); + + u_uint = static_cast(CLIP3((new_uv.u * 255.0f + 128.0f + 0.5f), 0, 255)); + v_uint = static_cast(CLIP3((new_uv.v * 255.0f + 128.0f + 0.5f), 0, 255)); +} + +//////////////////////////////////////////////////////////////////////////////// +// Gain map calculations +uint8_t encodeGain(float y_sdr, float y_hdr, ultrahdr_metadata_ptr metadata) { + return encodeGain(y_sdr, y_hdr, metadata, log2(metadata->minContentBoost), + log2(metadata->maxContentBoost)); +} + +uint8_t encodeGain(float y_sdr, float y_hdr, ultrahdr_metadata_ptr metadata, + float log2MinContentBoost, float log2MaxContentBoost) { + float gain = 1.0f; + if (y_sdr > 0.0f) { + gain = y_hdr / y_sdr; + } + + if (gain < metadata->minContentBoost) gain = metadata->minContentBoost; + if (gain > metadata->maxContentBoost) gain = metadata->maxContentBoost; + + return static_cast((log2(gain) - log2MinContentBoost) / + (log2MaxContentBoost - log2MinContentBoost) * 255.0f); +} + +Color applyGain(Color e, float gain, ultrahdr_metadata_ptr metadata) { + float logBoost = + log2(metadata->minContentBoost) * (1.0f - gain) + log2(metadata->maxContentBoost) * gain; + float gainFactor = exp2(logBoost); + return e * gainFactor; +} + +Color applyGain(Color e, float gain, ultrahdr_metadata_ptr metadata, float displayBoost) { + float logBoost = + log2(metadata->minContentBoost) * (1.0f - gain) + log2(metadata->maxContentBoost) * gain; + float gainFactor = exp2(logBoost * displayBoost / metadata->maxContentBoost); + return e * gainFactor; +} + +Color applyGainLUT(Color e, float gain, GainLUT& gainLUT) { + float gainFactor = gainLUT.getGainFactor(gain); + return e * gainFactor; +} + +Color getYuv420Pixel(jr_uncompressed_ptr image, size_t x, size_t y) { + uint8_t* luma_data = reinterpret_cast(image->data); + size_t luma_stride = image->luma_stride; + uint8_t* chroma_data = reinterpret_cast(image->chroma_data); + size_t chroma_stride = image->chroma_stride; + + size_t offset_cr = chroma_stride * (image->height / 2); + size_t pixel_y_idx = x + y * luma_stride; + size_t pixel_chroma_idx = x / 2 + (y / 2) * chroma_stride; + + uint8_t y_uint = luma_data[pixel_y_idx]; + uint8_t u_uint = chroma_data[pixel_chroma_idx]; + uint8_t v_uint = chroma_data[offset_cr + pixel_chroma_idx]; + + // 128 bias for UV given we are using jpeglib; see: + // https://github.com/kornelski/libjpeg/blob/master/structure.doc + return {{{static_cast(y_uint) / 255.0f, (static_cast(u_uint) - 128.0f) / 255.0f, + (static_cast(v_uint) - 128.0f) / 255.0f}}}; +} + +Color getP010Pixel(jr_uncompressed_ptr image, size_t x, size_t y) { + uint16_t* luma_data = reinterpret_cast(image->data); + size_t luma_stride = image->luma_stride == 0 ? image->width : image->luma_stride; + uint16_t* chroma_data = reinterpret_cast(image->chroma_data); + size_t chroma_stride = image->chroma_stride; + + size_t pixel_y_idx = y * luma_stride + x; + size_t pixel_u_idx = (y >> 1) * chroma_stride + (x & ~0x1); + size_t pixel_v_idx = pixel_u_idx + 1; + + uint16_t y_uint = luma_data[pixel_y_idx] >> 6; + uint16_t u_uint = chroma_data[pixel_u_idx] >> 6; + uint16_t v_uint = chroma_data[pixel_v_idx] >> 6; + + // Conversions include taking narrow-range into account. + return {{{(static_cast(y_uint) - 64.0f) / 876.0f, + (static_cast(u_uint) - 64.0f) / 896.0f - 0.5f, + (static_cast(v_uint) - 64.0f) / 896.0f - 0.5f}}}; +} + +typedef Color (*getPixelFn)(jr_uncompressed_ptr, size_t, size_t); + +static Color samplePixels(jr_uncompressed_ptr image, size_t map_scale_factor, size_t x, size_t y, + getPixelFn get_pixel_fn) { + Color e = {{{0.0f, 0.0f, 0.0f}}}; + for (size_t dy = 0; dy < map_scale_factor; ++dy) { + for (size_t dx = 0; dx < map_scale_factor; ++dx) { + e += get_pixel_fn(image, x * map_scale_factor + dx, y * map_scale_factor + dy); + } + } + + return e / static_cast(map_scale_factor * map_scale_factor); +} + +Color sampleYuv420(jr_uncompressed_ptr image, size_t map_scale_factor, size_t x, size_t y) { + return samplePixels(image, map_scale_factor, x, y, getYuv420Pixel); +} + +Color sampleP010(jr_uncompressed_ptr image, size_t map_scale_factor, size_t x, size_t y) { + return samplePixels(image, map_scale_factor, x, y, getP010Pixel); +} + +// TODO: do we need something more clever for filtering either the map or images +// to generate the map? + +static size_t clamp(const size_t& val, const size_t& low, const size_t& high) { + return val < low ? low : (high < val ? high : val); +} + +static float mapUintToFloat(uint8_t map_uint) { return static_cast(map_uint) / 255.0f; } + +static float pythDistance(float x_diff, float y_diff) { + return sqrt(pow(x_diff, 2.0f) + pow(y_diff, 2.0f)); +} + +// TODO: If map_scale_factor is guaranteed to be an integer, then remove the following. +float sampleMap(jr_uncompressed_ptr map, float map_scale_factor, size_t x, size_t y) { + float x_map = static_cast(x) / map_scale_factor; + float y_map = static_cast(y) / map_scale_factor; + + size_t x_lower = static_cast(floor(x_map)); + size_t x_upper = x_lower + 1; + size_t y_lower = static_cast(floor(y_map)); + size_t y_upper = y_lower + 1; + + x_lower = clamp(x_lower, 0, map->width - 1); + x_upper = clamp(x_upper, 0, map->width - 1); + y_lower = clamp(y_lower, 0, map->height - 1); + y_upper = clamp(y_upper, 0, map->height - 1); + + // Use Shepard's method for inverse distance weighting. For more information: + // en.wikipedia.org/wiki/Inverse_distance_weighting#Shepard's_method + + float e1 = mapUintToFloat(reinterpret_cast(map->data)[x_lower + y_lower * map->width]); + float e1_dist = + pythDistance(x_map - static_cast(x_lower), y_map - static_cast(y_lower)); + if (e1_dist == 0.0f) return e1; + + float e2 = mapUintToFloat(reinterpret_cast(map->data)[x_lower + y_upper * map->width]); + float e2_dist = + pythDistance(x_map - static_cast(x_lower), y_map - static_cast(y_upper)); + if (e2_dist == 0.0f) return e2; + + float e3 = mapUintToFloat(reinterpret_cast(map->data)[x_upper + y_lower * map->width]); + float e3_dist = + pythDistance(x_map - static_cast(x_upper), y_map - static_cast(y_lower)); + if (e3_dist == 0.0f) return e3; + + float e4 = mapUintToFloat(reinterpret_cast(map->data)[x_upper + y_upper * map->width]); + float e4_dist = + pythDistance(x_map - static_cast(x_upper), y_map - static_cast(y_upper)); + if (e4_dist == 0.0f) return e2; + + float e1_weight = 1.0f / e1_dist; + float e2_weight = 1.0f / e2_dist; + float e3_weight = 1.0f / e3_dist; + float e4_weight = 1.0f / e4_dist; + float total_weight = e1_weight + e2_weight + e3_weight + e4_weight; + + return e1 * (e1_weight / total_weight) + e2 * (e2_weight / total_weight) + + e3 * (e3_weight / total_weight) + e4 * (e4_weight / total_weight); +} + +float sampleMap(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y, + ShepardsIDW& weightTables) { + // TODO: If map_scale_factor is guaranteed to be an integer power of 2, then optimize the + // following by computing log2(map_scale_factor) once and then using >> log2(map_scale_factor) + size_t x_lower = x / map_scale_factor; + size_t x_upper = x_lower + 1; + size_t y_lower = y / map_scale_factor; + size_t y_upper = y_lower + 1; + + x_lower = std::min(x_lower, map->width - 1); + x_upper = std::min(x_upper, map->width - 1); + y_lower = std::min(y_lower, map->height - 1); + y_upper = std::min(y_upper, map->height - 1); + + float e1 = mapUintToFloat(reinterpret_cast(map->data)[x_lower + y_lower * map->width]); + float e2 = mapUintToFloat(reinterpret_cast(map->data)[x_lower + y_upper * map->width]); + float e3 = mapUintToFloat(reinterpret_cast(map->data)[x_upper + y_lower * map->width]); + float e4 = mapUintToFloat(reinterpret_cast(map->data)[x_upper + y_upper * map->width]); + + // TODO: If map_scale_factor is guaranteed to be an integer power of 2, then optimize the + // following by using & (map_scale_factor - 1) + int offset_x = x % map_scale_factor; + int offset_y = y % map_scale_factor; + + float* weights = weightTables.mWeights; + if (x_lower == x_upper && y_lower == y_upper) + weights = weightTables.mWeightsC; + else if (x_lower == x_upper) + weights = weightTables.mWeightsNR; + else if (y_lower == y_upper) + weights = weightTables.mWeightsNB; + weights += offset_y * map_scale_factor * 4 + offset_x * 4; + + return e1 * weights[0] + e2 * weights[1] + e3 * weights[2] + e4 * weights[3]; +} + +uint32_t colorToRgba1010102(Color e_gamma) { + return (0x3ff & static_cast(e_gamma.r * 1023.0f)) | + ((0x3ff & static_cast(e_gamma.g * 1023.0f)) << 10) | + ((0x3ff & static_cast(e_gamma.b * 1023.0f)) << 20) | + (0x3 << 30); // Set alpha to 1.0 +} + +uint64_t colorToRgbaF16(Color e_gamma) { + return (uint64_t)floatToHalf(e_gamma.r) | (((uint64_t)floatToHalf(e_gamma.g)) << 16) | + (((uint64_t)floatToHalf(e_gamma.b)) << 32) | (((uint64_t)floatToHalf(1.0f)) << 48); +} + +} // namespace ultrahdr diff --git a/lib/src/icc.cpp b/lib/src/icc.cpp new file mode 100644 index 0000000..b838660 --- /dev/null +++ b/lib/src/icc.cpp @@ -0,0 +1,680 @@ +/* + * Copyright 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "ultrahdr/ultrahdrcommon.h" +#include "ultrahdr/icc.h" + +namespace ultrahdr { + +static void Matrix3x3_apply(const Matrix3x3* m, float* x) { + float y0 = x[0] * m->vals[0][0] + x[1] * m->vals[0][1] + x[2] * m->vals[0][2]; + float y1 = x[0] * m->vals[1][0] + x[1] * m->vals[1][1] + x[2] * m->vals[1][2]; + float y2 = x[0] * m->vals[2][0] + x[1] * m->vals[2][1] + x[2] * m->vals[2][2]; + x[0] = y0; + x[1] = y1; + x[2] = y2; +} + +bool Matrix3x3_invert(const Matrix3x3* src, Matrix3x3* dst) { + double a00 = src->vals[0][0]; + double a01 = src->vals[1][0]; + double a02 = src->vals[2][0]; + double a10 = src->vals[0][1]; + double a11 = src->vals[1][1]; + double a12 = src->vals[2][1]; + double a20 = src->vals[0][2]; + double a21 = src->vals[1][2]; + double a22 = src->vals[2][2]; + + double b0 = a00 * a11 - a01 * a10; + double b1 = a00 * a12 - a02 * a10; + double b2 = a01 * a12 - a02 * a11; + double b3 = a20; + double b4 = a21; + double b5 = a22; + + double determinant = b0 * b5 - b1 * b4 + b2 * b3; + + if (determinant == 0) { + return false; + } + + double invdet = 1.0 / determinant; + if (invdet > +FLT_MAX || invdet < -FLT_MAX || !isfinitef_((float)invdet)) { + return false; + } + + b0 *= invdet; + b1 *= invdet; + b2 *= invdet; + b3 *= invdet; + b4 *= invdet; + b5 *= invdet; + + dst->vals[0][0] = (float)(a11 * b5 - a12 * b4); + dst->vals[1][0] = (float)(a02 * b4 - a01 * b5); + dst->vals[2][0] = (float)(+b2); + dst->vals[0][1] = (float)(a12 * b3 - a10 * b5); + dst->vals[1][1] = (float)(a00 * b5 - a02 * b3); + dst->vals[2][1] = (float)(-b1); + dst->vals[0][2] = (float)(a10 * b4 - a11 * b3); + dst->vals[1][2] = (float)(a01 * b3 - a00 * b4); + dst->vals[2][2] = (float)(+b0); + + for (int r = 0; r < 3; ++r) + for (int c = 0; c < 3; ++c) { + if (!isfinitef_(dst->vals[r][c])) { + return false; + } + } + return true; +} + +static Matrix3x3 Matrix3x3_concat(const Matrix3x3* A, const Matrix3x3* B) { + Matrix3x3 m = {{{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}}; + for (int r = 0; r < 3; r++) + for (int c = 0; c < 3; c++) { + m.vals[r][c] = A->vals[r][0] * B->vals[0][c] + A->vals[r][1] * B->vals[1][c] + + A->vals[r][2] * B->vals[2][c]; + } + return m; +} + +static void float_XYZD50_to_grid16_lab(const float* xyz_float, uint8_t* grid16_lab) { + float v[3] = { + xyz_float[0] / kD50_x, + xyz_float[1] / kD50_y, + xyz_float[2] / kD50_z, + }; + for (size_t i = 0; i < 3; ++i) { + v[i] = v[i] > 0.008856f ? cbrtf(v[i]) : v[i] * 7.787f + (16 / 116.0f); + } + const float L = v[1] * 116.0f - 16.0f; + const float a = (v[0] - v[1]) * 500.0f; + const float b = (v[1] - v[2]) * 200.0f; + const float Lab_unorm[3] = { + L * (1 / 100.f), + (a + 128.0f) * (1 / 255.0f), + (b + 128.0f) * (1 / 255.0f), + }; + // This will encode L=1 as 0xFFFF. This matches how skcms will interpret the + // table, but the spec appears to indicate that the value should be 0xFF00. + // https://crbug.com/skia/13807 + for (size_t i = 0; i < 3; ++i) { + reinterpret_cast(grid16_lab)[i] = + Endian_SwapBE16(float_round_to_unorm16(Lab_unorm[i])); + } +} + +std::string IccHelper::get_desc_string(const ultrahdr_transfer_function tf, + const ultrahdr_color_gamut gamut) { + std::string result; + switch (gamut) { + case ULTRAHDR_COLORGAMUT_BT709: + result += "sRGB"; + break; + case ULTRAHDR_COLORGAMUT_P3: + result += "Display P3"; + break; + case ULTRAHDR_COLORGAMUT_BT2100: + result += "Rec2020"; + break; + default: + result += "Unknown"; + break; + } + result += " Gamut with "; + switch (tf) { + case ULTRAHDR_TF_SRGB: + result += "sRGB"; + break; + case ULTRAHDR_TF_LINEAR: + result += "Linear"; + break; + case ULTRAHDR_TF_PQ: + result += "PQ"; + break; + case ULTRAHDR_TF_HLG: + result += "HLG"; + break; + default: + result += "Unknown"; + break; + } + result += " Transfer"; + return result; +} + +std::shared_ptr IccHelper::write_text_tag(const char* text) { + uint32_t text_length = strlen(text); + uint32_t header[] = { + Endian_SwapBE32(kTAG_TextType), // Type signature + 0, // Reserved + Endian_SwapBE32(1), // Number of records + Endian_SwapBE32(12), // Record size (must be 12) + Endian_SwapBE32(SetFourByteTag('e', 'n', 'U', 'S')), // English USA + Endian_SwapBE32(2 * text_length), // Length of string in bytes + Endian_SwapBE32(28), // Offset of string + }; + + uint32_t total_length = text_length * 2 + sizeof(header); + total_length = (((total_length + 2) >> 2) << 2); // 4 aligned + std::shared_ptr dataStruct = std::make_shared(total_length); + + if (!dataStruct->write(header, sizeof(header))) { + ALOGE("write_text_tag(): error in writing data"); + return dataStruct; + } + + for (size_t i = 0; i < text_length; i++) { + // Convert ASCII to big-endian UTF-16. + dataStruct->write8(0); + dataStruct->write8(text[i]); + } + + return dataStruct; +} + +std::shared_ptr IccHelper::write_xyz_tag(float x, float y, float z) { + uint32_t data[] = { + Endian_SwapBE32(kXYZ_PCSSpace), + 0, + static_cast(Endian_SwapBE32(float_round_to_fixed(x))), + static_cast(Endian_SwapBE32(float_round_to_fixed(y))), + static_cast(Endian_SwapBE32(float_round_to_fixed(z))), + }; + std::shared_ptr dataStruct = std::make_shared(sizeof(data)); + dataStruct->write(&data, sizeof(data)); + return dataStruct; +} + +std::shared_ptr IccHelper::write_trc_tag(const int table_entries, + const void* table_16) { + int total_length = 4 + 4 + 4 + table_entries * 2; + total_length = (((total_length + 2) >> 2) << 2); // 4 aligned + std::shared_ptr dataStruct = std::make_shared(total_length); + dataStruct->write32(Endian_SwapBE32(kTAG_CurveType)); // Type + dataStruct->write32(0); // Reserved + dataStruct->write32(Endian_SwapBE32(table_entries)); // Value count + for (int i = 0; i < table_entries; ++i) { + uint16_t value = reinterpret_cast(table_16)[i]; + dataStruct->write16(value); + } + return dataStruct; +} + +std::shared_ptr IccHelper::write_trc_tag(const TransferFunction& fn) { + if (fn.a == 1.f && fn.b == 0.f && fn.c == 0.f && fn.d == 0.f && fn.e == 0.f && fn.f == 0.f) { + int total_length = 16; + std::shared_ptr dataStruct = std::make_shared(total_length); + dataStruct->write32(Endian_SwapBE32(kTAG_ParaCurveType)); // Type + dataStruct->write32(0); // Reserved + dataStruct->write32(Endian_SwapBE16(kExponential_ParaCurveType)); + dataStruct->write32(Endian_SwapBE32(float_round_to_fixed(fn.g))); + return dataStruct; + } + + int total_length = 40; + std::shared_ptr dataStruct = std::make_shared(total_length); + dataStruct->write32(Endian_SwapBE32(kTAG_ParaCurveType)); // Type + dataStruct->write32(0); // Reserved + dataStruct->write32(Endian_SwapBE16(kGABCDEF_ParaCurveType)); + dataStruct->write32(Endian_SwapBE32(float_round_to_fixed(fn.g))); + dataStruct->write32(Endian_SwapBE32(float_round_to_fixed(fn.a))); + dataStruct->write32(Endian_SwapBE32(float_round_to_fixed(fn.b))); + dataStruct->write32(Endian_SwapBE32(float_round_to_fixed(fn.c))); + dataStruct->write32(Endian_SwapBE32(float_round_to_fixed(fn.d))); + dataStruct->write32(Endian_SwapBE32(float_round_to_fixed(fn.e))); + dataStruct->write32(Endian_SwapBE32(float_round_to_fixed(fn.f))); + return dataStruct; +} + +float IccHelper::compute_tone_map_gain(const ultrahdr_transfer_function tf, float L) { + if (L <= 0.f) { + return 1.f; + } + if (tf == ULTRAHDR_TF_PQ) { + // The PQ transfer function will map to the range [0, 1]. Linearly scale + // it up to the range [0, 10,000/203]. We will then tone map that back + // down to [0, 1]. + constexpr float kInputMaxLuminance = 10000 / 203.f; + constexpr float kOutputMaxLuminance = 1.0; + L *= kInputMaxLuminance; + + // Compute the tone map gain which will tone map from 10,000/203 to 1.0. + constexpr float kToneMapA = kOutputMaxLuminance / (kInputMaxLuminance * kInputMaxLuminance); + constexpr float kToneMapB = 1.f / kOutputMaxLuminance; + return kInputMaxLuminance * (1.f + kToneMapA * L) / (1.f + kToneMapB * L); + } + if (tf == ULTRAHDR_TF_HLG) { + // Let Lw be the brightness of the display in nits. + constexpr float Lw = 203.f; + const float gamma = 1.2f + 0.42f * std::log(Lw / 1000.f) / std::log(10.f); + return std::pow(L, gamma - 1.f); + } + return 1.f; +} + +std::shared_ptr IccHelper::write_cicp_tag(uint32_t color_primaries, + uint32_t transfer_characteristics) { + int total_length = 12; // 4 + 4 + 1 + 1 + 1 + 1 + std::shared_ptr dataStruct = std::make_shared(total_length); + dataStruct->write32(Endian_SwapBE32(kTAG_cicp)); // Type signature + dataStruct->write32(0); // Reserved + dataStruct->write8(color_primaries); // Color primaries + dataStruct->write8(transfer_characteristics); // Transfer characteristics + dataStruct->write8(0); // RGB matrix + dataStruct->write8(1); // Full range + return dataStruct; +} + +void IccHelper::compute_lut_entry(const Matrix3x3& src_to_XYZD50, float rgb[3]) { + // Compute the matrices to convert from source to Rec2020, and from Rec2020 to XYZD50. + Matrix3x3 src_to_rec2020; + const Matrix3x3 rec2020_to_XYZD50 = kRec2020; + { + Matrix3x3 XYZD50_to_rec2020; + Matrix3x3_invert(&rec2020_to_XYZD50, &XYZD50_to_rec2020); + src_to_rec2020 = Matrix3x3_concat(&XYZD50_to_rec2020, &src_to_XYZD50); + } + + // Convert the source signal to linear. + for (size_t i = 0; i < kNumChannels; ++i) { + rgb[i] = pqOetf(rgb[i]); + } + + // Convert source gamut to Rec2020. + Matrix3x3_apply(&src_to_rec2020, rgb); + + // Compute the luminance of the signal. + float L = bt2100Luminance({{{rgb[0], rgb[1], rgb[2]}}}); + + // Compute the tone map gain based on the luminance. + float tone_map_gain = compute_tone_map_gain(ULTRAHDR_TF_PQ, L); + + // Apply the tone map gain. + for (size_t i = 0; i < kNumChannels; ++i) { + rgb[i] *= tone_map_gain; + } + + // Convert from Rec2020-linear to XYZD50. + Matrix3x3_apply(&rec2020_to_XYZD50, rgb); +} + +std::shared_ptr IccHelper::write_clut(const uint8_t* grid_points, + const uint8_t* grid_16) { + uint32_t value_count = kNumChannels; + for (uint32_t i = 0; i < kNumChannels; ++i) { + value_count *= grid_points[i]; + } + + int total_length = 20 + 2 * value_count; + total_length = (((total_length + 2) >> 2) << 2); // 4 aligned + std::shared_ptr dataStruct = std::make_shared(total_length); + + for (size_t i = 0; i < 16; ++i) { + dataStruct->write8(i < kNumChannels ? grid_points[i] : 0); // Grid size + } + dataStruct->write8(2); // Grid byte width (always 16-bit) + dataStruct->write8(0); // Reserved + dataStruct->write8(0); // Reserved + dataStruct->write8(0); // Reserved + + for (uint32_t i = 0; i < value_count; ++i) { + uint16_t value = reinterpret_cast(grid_16)[i]; + dataStruct->write16(value); + } + + return dataStruct; +} + +std::shared_ptr IccHelper::write_mAB_or_mBA_tag(uint32_t type, bool has_a_curves, + const uint8_t* grid_points, + const uint8_t* grid_16) { + const size_t b_curves_offset = 32; + std::shared_ptr b_curves_data[kNumChannels]; + std::shared_ptr a_curves_data[kNumChannels]; + size_t clut_offset = 0; + std::shared_ptr clut; + size_t a_curves_offset = 0; + + // The "B" curve is required. + for (size_t i = 0; i < kNumChannels; ++i) { + b_curves_data[i] = write_trc_tag(kLinear_TransFun); + } + + // The "A" curve and CLUT are optional. + if (has_a_curves) { + clut_offset = b_curves_offset; + for (size_t i = 0; i < kNumChannels; ++i) { + clut_offset += b_curves_data[i]->getLength(); + } + clut = write_clut(grid_points, grid_16); + + a_curves_offset = clut_offset + clut->getLength(); + for (size_t i = 0; i < kNumChannels; ++i) { + a_curves_data[i] = write_trc_tag(kLinear_TransFun); + } + } + + int total_length = b_curves_offset; + for (size_t i = 0; i < kNumChannels; ++i) { + total_length += b_curves_data[i]->getLength(); + } + if (has_a_curves) { + total_length += clut->getLength(); + for (size_t i = 0; i < kNumChannels; ++i) { + total_length += a_curves_data[i]->getLength(); + } + } + std::shared_ptr dataStruct = std::make_shared(total_length); + dataStruct->write32(Endian_SwapBE32(type)); // Type signature + dataStruct->write32(0); // Reserved + dataStruct->write8(kNumChannels); // Input channels + dataStruct->write8(kNumChannels); // Output channels + dataStruct->write16(0); // Reserved + dataStruct->write32(Endian_SwapBE32(b_curves_offset)); // B curve offset + dataStruct->write32(Endian_SwapBE32(0)); // Matrix offset (ignored) + dataStruct->write32(Endian_SwapBE32(0)); // M curve offset (ignored) + dataStruct->write32(Endian_SwapBE32(clut_offset)); // CLUT offset + dataStruct->write32(Endian_SwapBE32(a_curves_offset)); // A curve offset + for (size_t i = 0; i < kNumChannels; ++i) { + if (dataStruct->write(b_curves_data[i]->getData(), b_curves_data[i]->getLength())) { + return dataStruct; + } + } + if (has_a_curves) { + dataStruct->write(clut->getData(), clut->getLength()); + for (size_t i = 0; i < kNumChannels; ++i) { + dataStruct->write(a_curves_data[i]->getData(), a_curves_data[i]->getLength()); + } + } + return dataStruct; +} + +std::shared_ptr IccHelper::writeIccProfile(ultrahdr_transfer_function tf, + ultrahdr_color_gamut gamut) { + ICCHeader header; + + std::vector>> tags; + + // Compute profile description tag + std::string desc = get_desc_string(tf, gamut); + + tags.emplace_back(kTAG_desc, write_text_tag(desc.c_str())); + + Matrix3x3 toXYZD50; + switch (gamut) { + case ULTRAHDR_COLORGAMUT_BT709: + toXYZD50 = kSRGB; + break; + case ULTRAHDR_COLORGAMUT_P3: + toXYZD50 = kDisplayP3; + break; + case ULTRAHDR_COLORGAMUT_BT2100: + toXYZD50 = kRec2020; + break; + default: + // Should not fall here. + return nullptr; + } + + // Compute primaries. + { + tags.emplace_back(kTAG_rXYZ, + write_xyz_tag(toXYZD50.vals[0][0], toXYZD50.vals[1][0], toXYZD50.vals[2][0])); + tags.emplace_back(kTAG_gXYZ, + write_xyz_tag(toXYZD50.vals[0][1], toXYZD50.vals[1][1], toXYZD50.vals[2][1])); + tags.emplace_back(kTAG_bXYZ, + write_xyz_tag(toXYZD50.vals[0][2], toXYZD50.vals[1][2], toXYZD50.vals[2][2])); + } + + // Compute white point tag (must be D50) + tags.emplace_back(kTAG_wtpt, write_xyz_tag(kD50_x, kD50_y, kD50_z)); + + // Compute transfer curves. + if (tf != ULTRAHDR_TF_PQ) { + if (tf == ULTRAHDR_TF_HLG) { + std::vector trc_table; + trc_table.resize(kTrcTableSize * 2); + for (uint32_t i = 0; i < kTrcTableSize; ++i) { + float x = i / (kTrcTableSize - 1.f); + float y = hlgOetf(x); + y *= compute_tone_map_gain(tf, y); + float_to_table16(y, &trc_table[2 * i]); + } + + tags.emplace_back(kTAG_rTRC, + write_trc_tag(kTrcTableSize, reinterpret_cast(trc_table.data()))); + tags.emplace_back(kTAG_gTRC, + write_trc_tag(kTrcTableSize, reinterpret_cast(trc_table.data()))); + tags.emplace_back(kTAG_bTRC, + write_trc_tag(kTrcTableSize, reinterpret_cast(trc_table.data()))); + } else { + tags.emplace_back(kTAG_rTRC, write_trc_tag(kSRGB_TransFun)); + tags.emplace_back(kTAG_gTRC, write_trc_tag(kSRGB_TransFun)); + tags.emplace_back(kTAG_bTRC, write_trc_tag(kSRGB_TransFun)); + } + } + + // Compute CICP. + if (tf == ULTRAHDR_TF_HLG || tf == ULTRAHDR_TF_PQ) { + // The CICP tag is present in ICC 4.4, so update the header's version. + header.version = Endian_SwapBE32(0x04400000); + + uint32_t color_primaries = 0; + if (gamut == ULTRAHDR_COLORGAMUT_BT709) { + color_primaries = kCICPPrimariesSRGB; + } else if (gamut == ULTRAHDR_COLORGAMUT_P3) { + color_primaries = kCICPPrimariesP3; + } + + uint32_t transfer_characteristics = 0; + if (tf == ULTRAHDR_TF_SRGB) { + transfer_characteristics = kCICPTrfnSRGB; + } else if (tf == ULTRAHDR_TF_LINEAR) { + transfer_characteristics = kCICPTrfnLinear; + } else if (tf == ULTRAHDR_TF_PQ) { + transfer_characteristics = kCICPTrfnPQ; + } else if (tf == ULTRAHDR_TF_HLG) { + transfer_characteristics = kCICPTrfnHLG; + } + tags.emplace_back(kTAG_cicp, write_cicp_tag(color_primaries, transfer_characteristics)); + } + + // Compute A2B0. + if (tf == ULTRAHDR_TF_PQ) { + std::vector a2b_grid; + a2b_grid.resize(kGridSize * kGridSize * kGridSize * kNumChannels * 2); + size_t a2b_grid_index = 0; + for (uint32_t r_index = 0; r_index < kGridSize; ++r_index) { + for (uint32_t g_index = 0; g_index < kGridSize; ++g_index) { + for (uint32_t b_index = 0; b_index < kGridSize; ++b_index) { + float rgb[3] = { + r_index / (kGridSize - 1.f), + g_index / (kGridSize - 1.f), + b_index / (kGridSize - 1.f), + }; + compute_lut_entry(toXYZD50, rgb); + float_XYZD50_to_grid16_lab(rgb, &a2b_grid[a2b_grid_index]); + a2b_grid_index += 6; + } + } + } + const uint8_t* grid_16 = reinterpret_cast(a2b_grid.data()); + + uint8_t grid_points[kNumChannels]; + for (size_t i = 0; i < kNumChannels; ++i) { + grid_points[i] = kGridSize; + } + + auto a2b_data = write_mAB_or_mBA_tag(kTAG_mABType, + /* has_a_curves */ true, grid_points, grid_16); + tags.emplace_back(kTAG_A2B0, std::move(a2b_data)); + } + + // Compute B2A0. + if (tf == ULTRAHDR_TF_PQ) { + auto b2a_data = write_mAB_or_mBA_tag(kTAG_mBAType, + /* has_a_curves */ false, + /* grid_points */ nullptr, + /* grid_16 */ nullptr); + tags.emplace_back(kTAG_B2A0, std::move(b2a_data)); + } + + // Compute copyright tag + tags.emplace_back(kTAG_cprt, write_text_tag("Google Inc. 2022")); + + // Compute the size of the profile. + size_t tag_data_size = 0; + for (const auto& tag : tags) { + tag_data_size += tag.second->getLength(); + } + size_t tag_table_size = kICCTagTableEntrySize * tags.size(); + size_t profile_size = kICCHeaderSize + tag_table_size + tag_data_size; + + std::shared_ptr dataStruct = + std::make_shared(profile_size + kICCIdentifierSize); + + // Write identifier, chunk count, and chunk ID + if (!dataStruct->write(kICCIdentifier, sizeof(kICCIdentifier)) || !dataStruct->write8(1) || + !dataStruct->write8(1)) { + ALOGE("writeIccProfile(): error in identifier"); + return dataStruct; + } + + // Write the header. + header.data_color_space = Endian_SwapBE32(Signature_RGB); + header.pcs = Endian_SwapBE32(tf == ULTRAHDR_TF_PQ ? Signature_Lab : Signature_XYZ); + header.size = Endian_SwapBE32(profile_size); + header.tag_count = Endian_SwapBE32(tags.size()); + + if (!dataStruct->write(&header, sizeof(header))) { + ALOGE("writeIccProfile(): error in header"); + return dataStruct; + } + + // Write the tag table. Track the offset and size of the previous tag to + // compute each tag's offset. An empty SkData indicates that the previous + // tag is to be reused. + uint32_t last_tag_offset = sizeof(header) + tag_table_size; + uint32_t last_tag_size = 0; + for (const auto& tag : tags) { + last_tag_offset = last_tag_offset + last_tag_size; + last_tag_size = tag.second->getLength(); + uint32_t tag_table_entry[3] = { + Endian_SwapBE32(tag.first), + Endian_SwapBE32(last_tag_offset), + Endian_SwapBE32(last_tag_size), + }; + if (!dataStruct->write(tag_table_entry, sizeof(tag_table_entry))) { + ALOGE("writeIccProfile(): error in writing tag table"); + return dataStruct; + } + } + + // Write the tags. + for (const auto& tag : tags) { + if (!dataStruct->write(tag.second->getData(), tag.second->getLength())) { + ALOGE("writeIccProfile(): error in writing tags"); + return dataStruct; + } + } + + return dataStruct; +} + +bool IccHelper::tagsEqualToMatrix(const Matrix3x3& matrix, const uint8_t* red_tag, + const uint8_t* green_tag, const uint8_t* blue_tag) { + std::shared_ptr red_tag_test = + write_xyz_tag(matrix.vals[0][0], matrix.vals[1][0], matrix.vals[2][0]); + std::shared_ptr green_tag_test = + write_xyz_tag(matrix.vals[0][1], matrix.vals[1][1], matrix.vals[2][1]); + std::shared_ptr blue_tag_test = + write_xyz_tag(matrix.vals[0][2], matrix.vals[1][2], matrix.vals[2][2]); + return memcmp(red_tag, red_tag_test->getData(), kColorantTagSize) == 0 && + memcmp(green_tag, green_tag_test->getData(), kColorantTagSize) == 0 && + memcmp(blue_tag, blue_tag_test->getData(), kColorantTagSize) == 0; +} + +ultrahdr_color_gamut IccHelper::readIccColorGamut(void* icc_data, size_t icc_size) { + // Each tag table entry consists of 3 fields of 4 bytes each. + static const size_t kTagTableEntrySize = 12; + + if (icc_data == nullptr || icc_size < sizeof(ICCHeader) + kICCIdentifierSize) { + return ULTRAHDR_COLORGAMUT_UNSPECIFIED; + } + + if (memcmp(icc_data, kICCIdentifier, sizeof(kICCIdentifier)) != 0) { + return ULTRAHDR_COLORGAMUT_UNSPECIFIED; + } + + uint8_t* icc_bytes = reinterpret_cast(icc_data) + kICCIdentifierSize; + + ICCHeader* header = reinterpret_cast(icc_bytes); + + // Use 0 to indicate not found, since offsets are always relative to start + // of ICC data and therefore a tag offset of zero would never be valid. + size_t red_primary_offset = 0, green_primary_offset = 0, blue_primary_offset = 0; + size_t red_primary_size = 0, green_primary_size = 0, blue_primary_size = 0; + for (size_t tag_idx = 0; tag_idx < Endian_SwapBE32(header->tag_count); ++tag_idx) { + uint32_t* tag_entry_start = + reinterpret_cast(icc_bytes + sizeof(ICCHeader) + tag_idx * kTagTableEntrySize); + // first 4 bytes are the tag signature, next 4 bytes are the tag offset, + // last 4 bytes are the tag length in bytes. + if (red_primary_offset == 0 && *tag_entry_start == Endian_SwapBE32(kTAG_rXYZ)) { + red_primary_offset = Endian_SwapBE32(*(tag_entry_start + 1)); + red_primary_size = Endian_SwapBE32(*(tag_entry_start + 2)); + } else if (green_primary_offset == 0 && *tag_entry_start == Endian_SwapBE32(kTAG_gXYZ)) { + green_primary_offset = Endian_SwapBE32(*(tag_entry_start + 1)); + green_primary_size = Endian_SwapBE32(*(tag_entry_start + 2)); + } else if (blue_primary_offset == 0 && *tag_entry_start == Endian_SwapBE32(kTAG_bXYZ)) { + blue_primary_offset = Endian_SwapBE32(*(tag_entry_start + 1)); + blue_primary_size = Endian_SwapBE32(*(tag_entry_start + 2)); + } + } + + if (red_primary_offset == 0 || red_primary_size != kColorantTagSize || + kICCIdentifierSize + red_primary_offset + red_primary_size > icc_size || + green_primary_offset == 0 || green_primary_size != kColorantTagSize || + kICCIdentifierSize + green_primary_offset + green_primary_size > icc_size || + blue_primary_offset == 0 || blue_primary_size != kColorantTagSize || + kICCIdentifierSize + blue_primary_offset + blue_primary_size > icc_size) { + return ULTRAHDR_COLORGAMUT_UNSPECIFIED; + } + + uint8_t* red_tag = icc_bytes + red_primary_offset; + uint8_t* green_tag = icc_bytes + green_primary_offset; + uint8_t* blue_tag = icc_bytes + blue_primary_offset; + + // Serialize tags as we do on encode and compare what we find to that to + // determine the gamut (since we don't have a need yet for full deserialize). + if (tagsEqualToMatrix(kSRGB, red_tag, green_tag, blue_tag)) { + return ULTRAHDR_COLORGAMUT_BT709; + } else if (tagsEqualToMatrix(kDisplayP3, red_tag, green_tag, blue_tag)) { + return ULTRAHDR_COLORGAMUT_P3; + } else if (tagsEqualToMatrix(kRec2020, red_tag, green_tag, blue_tag)) { + return ULTRAHDR_COLORGAMUT_BT2100; + } + + // Didn't find a match to one of the profiles we write; indicate the gamut + // is unspecified since we don't understand it. + return ULTRAHDR_COLORGAMUT_UNSPECIFIED; +} + +} // namespace ultrahdr diff --git a/lib/src/jpegdecoderhelper.cpp b/lib/src/jpegdecoderhelper.cpp new file mode 100644 index 0000000..8a8278d --- /dev/null +++ b/lib/src/jpegdecoderhelper.cpp @@ -0,0 +1,537 @@ +/* + * Copyright 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include + +#include "ultrahdr/ultrahdrcommon.h" +#include "ultrahdr/ultrahdr.h" +#include "ultrahdr/jpegdecoderhelper.h" + +using namespace std; + +namespace ultrahdr { + +const uint32_t kAPP0Marker = JPEG_APP0; // JFIF +const uint32_t kAPP1Marker = JPEG_APP0 + 1; // EXIF, XMP +const uint32_t kAPP2Marker = JPEG_APP0 + 2; // ICC + +constexpr uint32_t kICCMarkerHeaderSize = 14; +constexpr uint8_t kICCSig[] = { + 'I', 'C', 'C', '_', 'P', 'R', 'O', 'F', 'I', 'L', 'E', '\0', +}; +constexpr uint8_t kXmpNameSpace[] = { + 'h', 't', 't', 'p', ':', '/', '/', 'n', 's', '.', 'a', 'd', 'o', 'b', 'e', + '.', 'c', 'o', 'm', '/', 'x', 'a', 'p', '/', '1', '.', '0', '/', '\0', +}; +constexpr uint8_t kExifIdCode[] = { + 'E', 'x', 'i', 'f', '\0', '\0', +}; + +struct jpegr_source_mgr : jpeg_source_mgr { + jpegr_source_mgr(const uint8_t* ptr, int len); + ~jpegr_source_mgr(); + + const uint8_t* mBufferPtr; + size_t mBufferLength; +}; + +struct jpegrerror_mgr { + struct jpeg_error_mgr pub; + jmp_buf setjmp_buffer; +}; + +static void jpegr_init_source(j_decompress_ptr cinfo) { + jpegr_source_mgr* src = static_cast(cinfo->src); + src->next_input_byte = static_cast(src->mBufferPtr); + src->bytes_in_buffer = src->mBufferLength; +} + +static boolean jpegr_fill_input_buffer(j_decompress_ptr /* cinfo */) { + ALOGE("%s : should not get here", __func__); + return FALSE; +} + +static void jpegr_skip_input_data(j_decompress_ptr cinfo, long num_bytes) { + jpegr_source_mgr* src = static_cast(cinfo->src); + + if (num_bytes > static_cast(src->bytes_in_buffer)) { + ALOGE("jpegr_skip_input_data - num_bytes > (long)src->bytes_in_buffer"); + } else { + src->next_input_byte += num_bytes; + src->bytes_in_buffer -= num_bytes; + } +} + +static void jpegr_term_source(j_decompress_ptr /*cinfo*/) {} + +jpegr_source_mgr::jpegr_source_mgr(const uint8_t* ptr, int len) + : mBufferPtr(ptr), mBufferLength(len) { + init_source = jpegr_init_source; + fill_input_buffer = jpegr_fill_input_buffer; + skip_input_data = jpegr_skip_input_data; + resync_to_restart = jpeg_resync_to_restart; + term_source = jpegr_term_source; +} + +jpegr_source_mgr::~jpegr_source_mgr() {} + +static void jpegrerror_exit(j_common_ptr cinfo) { + jpegrerror_mgr* err = reinterpret_cast(cinfo->err); + longjmp(err->setjmp_buffer, 1); +} + +static void output_message(j_common_ptr cinfo) { + char buffer[JMSG_LENGTH_MAX]; + + /* Create the message */ + (*cinfo->err->format_message)(cinfo, buffer); + ALOGE("%s\n", buffer); +} + +JpegDecoderHelper::JpegDecoderHelper() {} + +JpegDecoderHelper::~JpegDecoderHelper() {} + +bool JpegDecoderHelper::decompressImage(const void* image, int length, bool decodeToRGBA) { + if (image == nullptr || length <= 0) { + ALOGE("Image size can not be handled: %d", length); + return false; + } + mResultBuffer.clear(); + mXMPBuffer.clear(); + return decode(image, length, decodeToRGBA); +} + +void* JpegDecoderHelper::getDecompressedImagePtr() { return mResultBuffer.data(); } + +size_t JpegDecoderHelper::getDecompressedImageSize() { return mResultBuffer.size(); } + +void* JpegDecoderHelper::getXMPPtr() { return mXMPBuffer.data(); } + +size_t JpegDecoderHelper::getXMPSize() { return mXMPBuffer.size(); } + +void* JpegDecoderHelper::getEXIFPtr() { return mEXIFBuffer.data(); } + +size_t JpegDecoderHelper::getEXIFSize() { return mEXIFBuffer.size(); } + +void* JpegDecoderHelper::getICCPtr() { return mICCBuffer.data(); } + +size_t JpegDecoderHelper::getICCSize() { return mICCBuffer.size(); } + +size_t JpegDecoderHelper::getDecompressedImageWidth() { return mWidth; } + +size_t JpegDecoderHelper::getDecompressedImageHeight() { return mHeight; } + +// Here we only handle the first EXIF package, and in theary EXIF (or JFIF) must be the first +// in the image file. +// We assume that all packages are starting with two bytes marker (eg FF E1 for EXIF package), +// two bytes of package length which is stored in marker->original_length, and the real data +// which is stored in marker->data. +bool JpegDecoderHelper::extractEXIF(const void* image, int length) { + jpeg_decompress_struct cinfo; + jpegr_source_mgr mgr(static_cast(image), length); + jpegrerror_mgr myerr; + + cinfo.err = jpeg_std_error(&myerr.pub); + myerr.pub.error_exit = jpegrerror_exit; + myerr.pub.output_message = output_message; + + if (setjmp(myerr.setjmp_buffer)) { + jpeg_destroy_decompress(&cinfo); + return false; + } + jpeg_create_decompress(&cinfo); + + jpeg_save_markers(&cinfo, kAPP0Marker, 0xFFFF); + jpeg_save_markers(&cinfo, kAPP1Marker, 0xFFFF); + + cinfo.src = &mgr; + jpeg_read_header(&cinfo, TRUE); + + size_t pos = 2; // position after SOI + for (jpeg_marker_struct* marker = cinfo.marker_list; marker; marker = marker->next) { + pos += 4; + pos += marker->original_length; + + if (marker->marker != kAPP1Marker) { + continue; + } + + const unsigned int len = marker->data_length; + + if (len > sizeof(kExifIdCode) && !memcmp(marker->data, kExifIdCode, sizeof(kExifIdCode))) { + mEXIFBuffer.resize(len, 0); + memcpy(static_cast(mEXIFBuffer.data()), marker->data, len); + mExifPos = pos - marker->original_length; + break; + } + } + + jpeg_destroy_decompress(&cinfo); + return true; +} + +bool JpegDecoderHelper::decode(const void* image, int length, bool decodeToRGBA) { + bool status = true; + jpeg_decompress_struct cinfo; + jpegrerror_mgr myerr; + cinfo.err = jpeg_std_error(&myerr.pub); + myerr.pub.error_exit = jpegrerror_exit; + myerr.pub.output_message = output_message; + + if (setjmp(myerr.setjmp_buffer)) { + jpeg_destroy_decompress(&cinfo); + return false; + } + + jpeg_create_decompress(&cinfo); + + jpeg_save_markers(&cinfo, kAPP0Marker, 0xFFFF); + jpeg_save_markers(&cinfo, kAPP1Marker, 0xFFFF); + jpeg_save_markers(&cinfo, kAPP2Marker, 0xFFFF); + + jpegr_source_mgr mgr(static_cast(image), length); + cinfo.src = &mgr; + if (jpeg_read_header(&cinfo, TRUE) != JPEG_HEADER_OK) { + jpeg_destroy_decompress(&cinfo); + return false; + } + + // Save XMP data, EXIF data, and ICC data. + // Here we only handle the first XMP / EXIF / ICC package. + // We assume that all packages are starting with two bytes marker (eg FF E1 for EXIF package), + // two bytes of package length which is stored in marker->original_length, and the real data + // which is stored in marker->data. + bool exifAppears = false; + bool xmpAppears = false; + bool iccAppears = false; + size_t pos = 2; // position after SOI + for (jpeg_marker_struct* marker = cinfo.marker_list; + marker && !(exifAppears && xmpAppears && iccAppears); marker = marker->next) { + pos += 4; + pos += marker->original_length; + if (marker->marker != kAPP1Marker && marker->marker != kAPP2Marker) { + continue; + } + const unsigned int len = marker->data_length; + if (!xmpAppears && len > sizeof(kXmpNameSpace) && + !memcmp(marker->data, kXmpNameSpace, sizeof(kXmpNameSpace))) { + mXMPBuffer.resize(len + 1, 0); + memcpy(static_cast(mXMPBuffer.data()), marker->data, len); + xmpAppears = true; + } else if (!exifAppears && len > sizeof(kExifIdCode) && + !memcmp(marker->data, kExifIdCode, sizeof(kExifIdCode))) { + mEXIFBuffer.resize(len, 0); + memcpy(static_cast(mEXIFBuffer.data()), marker->data, len); + exifAppears = true; + mExifPos = pos - marker->original_length; + } else if (!iccAppears && len > sizeof(kICCSig) && + !memcmp(marker->data, kICCSig, sizeof(kICCSig))) { + mICCBuffer.resize(len, 0); + memcpy(static_cast(mICCBuffer.data()), marker->data, len); + iccAppears = true; + } + } + + mWidth = cinfo.image_width; + mHeight = cinfo.image_height; + if (mWidth > kMaxWidth || mHeight > kMaxHeight) { + status = false; + goto CleanUp; + } + + if (decodeToRGBA) { + // The primary image is expected to be yuv420 sampling + if (cinfo.jpeg_color_space != JCS_YCbCr) { + status = false; + ALOGE("%s: decodeToRGBA unexpected jpeg color space ", __func__); + goto CleanUp; + } + if (cinfo.comp_info[0].h_samp_factor != 2 || cinfo.comp_info[0].v_samp_factor != 2 || + cinfo.comp_info[1].h_samp_factor != 1 || cinfo.comp_info[1].v_samp_factor != 1 || + cinfo.comp_info[2].h_samp_factor != 1 || cinfo.comp_info[2].v_samp_factor != 1) { + status = false; + ALOGE("%s: decodeToRGBA unexpected primary image sub-sampling", __func__); + goto CleanUp; + } + // 4 bytes per pixel + mResultBuffer.resize(cinfo.image_width * cinfo.image_height * 4); + cinfo.out_color_space = JCS_EXT_RGBA; + } else { + if (cinfo.jpeg_color_space == JCS_YCbCr) { + if (cinfo.comp_info[0].h_samp_factor != 2 || cinfo.comp_info[0].v_samp_factor != 2 || + cinfo.comp_info[1].h_samp_factor != 1 || cinfo.comp_info[1].v_samp_factor != 1 || + cinfo.comp_info[2].h_samp_factor != 1 || cinfo.comp_info[2].v_samp_factor != 1) { + status = false; + ALOGE("%s: decoding to YUV only supports 4:2:0 subsampling", __func__); + goto CleanUp; + } + mResultBuffer.resize(cinfo.image_width * cinfo.image_height * 3 / 2, 0); + } else if (cinfo.jpeg_color_space == JCS_GRAYSCALE) { + mResultBuffer.resize(cinfo.image_width * cinfo.image_height, 0); + } else { + status = false; + ALOGE("%s: decodeToYUV unexpected jpeg color space", __func__); + goto CleanUp; + } + cinfo.out_color_space = cinfo.jpeg_color_space; + cinfo.raw_data_out = TRUE; + } + + cinfo.dct_method = JDCT_ISLOW; + jpeg_start_decompress(&cinfo); + if (!decompress(&cinfo, static_cast(mResultBuffer.data()), + cinfo.jpeg_color_space == JCS_GRAYSCALE)) { + status = false; + goto CleanUp; + } + +CleanUp: + jpeg_finish_decompress(&cinfo); + jpeg_destroy_decompress(&cinfo); + + return status; +} + +bool JpegDecoderHelper::decompress(jpeg_decompress_struct* cinfo, const uint8_t* dest, + bool isSingleChannel) { + return isSingleChannel ? decompressSingleChannel(cinfo, dest) + : ((cinfo->out_color_space == JCS_EXT_RGBA) ? decompressRGBA(cinfo, dest) + : decompressYUV(cinfo, dest)); +} + +bool JpegDecoderHelper::getCompressedImageParameters(const void* image, int length, size_t* pWidth, + size_t* pHeight, std::vector* iccData, + std::vector* exifData) { + jpeg_decompress_struct cinfo; + jpegrerror_mgr myerr; + cinfo.err = jpeg_std_error(&myerr.pub); + myerr.pub.error_exit = jpegrerror_exit; + myerr.pub.output_message = output_message; + + if (setjmp(myerr.setjmp_buffer)) { + jpeg_destroy_decompress(&cinfo); + return false; + } + jpeg_create_decompress(&cinfo); + + jpeg_save_markers(&cinfo, kAPP1Marker, 0xFFFF); + jpeg_save_markers(&cinfo, kAPP2Marker, 0xFFFF); + + jpegr_source_mgr mgr(static_cast(image), length); + cinfo.src = &mgr; + if (jpeg_read_header(&cinfo, TRUE) != JPEG_HEADER_OK) { + jpeg_destroy_decompress(&cinfo); + return false; + } + + if (pWidth != nullptr) { + *pWidth = cinfo.image_width; + } + if (pHeight != nullptr) { + *pHeight = cinfo.image_height; + } + + if (iccData != nullptr) { + for (jpeg_marker_struct* marker = cinfo.marker_list; marker; marker = marker->next) { + if (marker->marker != kAPP2Marker) { + continue; + } + if (marker->data_length <= kICCMarkerHeaderSize || + memcmp(marker->data, kICCSig, sizeof(kICCSig)) != 0) { + continue; + } + + iccData->insert(iccData->end(), marker->data, marker->data + marker->data_length); + } + } + + if (exifData != nullptr) { + bool exifAppears = false; + for (jpeg_marker_struct* marker = cinfo.marker_list; marker && !exifAppears; + marker = marker->next) { + if (marker->marker != kAPP1Marker) { + continue; + } + + const unsigned int len = marker->data_length; + if (len >= sizeof(kExifIdCode) && !memcmp(marker->data, kExifIdCode, sizeof(kExifIdCode))) { + exifData->resize(len, 0); + memcpy(static_cast(exifData->data()), marker->data, len); + exifAppears = true; + } + } + } + + jpeg_destroy_decompress(&cinfo); + return true; +} + +bool JpegDecoderHelper::decompressRGBA(jpeg_decompress_struct* cinfo, const uint8_t* dest) { + JSAMPLE* out = (JSAMPLE*)dest; + + while (cinfo->output_scanline < cinfo->image_height) { + if (1 != jpeg_read_scanlines(cinfo, &out, 1)) return false; + out += cinfo->image_width * 4; + } + return true; +} + +bool JpegDecoderHelper::decompressYUV(jpeg_decompress_struct* cinfo, const uint8_t* dest) { + size_t luma_plane_size = cinfo->image_width * cinfo->image_height; + size_t chroma_plane_size = luma_plane_size / 4; + uint8_t* y_plane = const_cast(dest); + uint8_t* u_plane = const_cast(dest + luma_plane_size); + uint8_t* v_plane = const_cast(dest + luma_plane_size + chroma_plane_size); + + const size_t aligned_width = ALIGNM(cinfo->image_width, kCompressBatchSize); + const bool is_width_aligned = (aligned_width == cinfo->image_width); + uint8_t* y_plane_intrm = nullptr; + uint8_t* u_plane_intrm = nullptr; + uint8_t* v_plane_intrm = nullptr; + + JSAMPROW y[kCompressBatchSize]; + JSAMPROW cb[kCompressBatchSize / 2]; + JSAMPROW cr[kCompressBatchSize / 2]; + JSAMPARRAY planes[3]{y, cb, cr}; + JSAMPROW y_intrm[kCompressBatchSize]; + JSAMPROW cb_intrm[kCompressBatchSize / 2]; + JSAMPROW cr_intrm[kCompressBatchSize / 2]; + JSAMPARRAY planes_intrm[3]{y_intrm, cb_intrm, cr_intrm}; + + if (cinfo->image_height % kCompressBatchSize != 0) { + mEmpty = std::make_unique(aligned_width); + } + + if (!is_width_aligned) { + size_t mcu_row_size = aligned_width * kCompressBatchSize * 3 / 2; + mBufferIntermediate = std::make_unique(mcu_row_size); + y_plane_intrm = mBufferIntermediate.get(); + u_plane_intrm = y_plane_intrm + (aligned_width * kCompressBatchSize); + v_plane_intrm = u_plane_intrm + (aligned_width * kCompressBatchSize) / 4; + for (int i = 0; i < kCompressBatchSize; ++i) { + y_intrm[i] = y_plane_intrm + i * aligned_width; + } + for (int i = 0; i < kCompressBatchSize / 2; ++i) { + int offset_intrm = i * (aligned_width / 2); + cb_intrm[i] = u_plane_intrm + offset_intrm; + cr_intrm[i] = v_plane_intrm + offset_intrm; + } + } + + while (cinfo->output_scanline < cinfo->image_height) { + size_t scanline_copy = cinfo->output_scanline; + for (int i = 0; i < kCompressBatchSize; ++i) { + size_t scanline = cinfo->output_scanline + i; + if (scanline < cinfo->image_height) { + y[i] = y_plane + scanline * cinfo->image_width; + } else { + y[i] = mEmpty.get(); + } + } + // cb, cr only have half scanlines + for (int i = 0; i < kCompressBatchSize / 2; ++i) { + size_t scanline = cinfo->output_scanline / 2 + i; + if (scanline < cinfo->image_height / 2) { + int offset = scanline * (cinfo->image_width / 2); + cb[i] = u_plane + offset; + cr[i] = v_plane + offset; + } else { + cb[i] = cr[i] = mEmpty.get(); + } + } + + int processed = + jpeg_read_raw_data(cinfo, is_width_aligned ? planes : planes_intrm, kCompressBatchSize); + if (processed != kCompressBatchSize) { + ALOGE("Number of processed lines does not equal input lines."); + return false; + } + if (!is_width_aligned) { + for (int i = 0; i < kCompressBatchSize; ++i) { + if (scanline_copy + i < cinfo->image_height) { + memcpy(y[i], y_intrm[i], cinfo->image_width); + } + } + for (int i = 0; i < kCompressBatchSize / 2; ++i) { + if (((scanline_copy / 2) + i) < (cinfo->image_height / 2)) { + memcpy(cb[i], cb_intrm[i], cinfo->image_width / 2); + memcpy(cr[i], cr_intrm[i], cinfo->image_width / 2); + } + } + } + } + return true; +} + +bool JpegDecoderHelper::decompressSingleChannel(jpeg_decompress_struct* cinfo, + const uint8_t* dest) { + uint8_t* y_plane = const_cast(dest); + uint8_t* y_plane_intrm = nullptr; + + const size_t aligned_width = ALIGNM(cinfo->image_width, kCompressBatchSize); + const bool is_width_aligned = (aligned_width == cinfo->image_width); + + JSAMPROW y[kCompressBatchSize]; + JSAMPARRAY planes[1]{y}; + JSAMPROW y_intrm[kCompressBatchSize]; + JSAMPARRAY planes_intrm[1]{y_intrm}; + + if (cinfo->image_height % kCompressBatchSize != 0) { + mEmpty = std::make_unique(aligned_width); + } + + if (!is_width_aligned) { + size_t mcu_row_size = aligned_width * kCompressBatchSize; + mBufferIntermediate = std::make_unique(mcu_row_size); + y_plane_intrm = mBufferIntermediate.get(); + for (int i = 0; i < kCompressBatchSize; ++i) { + y_intrm[i] = y_plane_intrm + i * aligned_width; + } + } + + while (cinfo->output_scanline < cinfo->image_height) { + size_t scanline_copy = cinfo->output_scanline; + for (int i = 0; i < kCompressBatchSize; ++i) { + size_t scanline = cinfo->output_scanline + i; + if (scanline < cinfo->image_height) { + y[i] = y_plane + scanline * cinfo->image_width; + } else { + y[i] = mEmpty.get(); + } + } + + int processed = + jpeg_read_raw_data(cinfo, is_width_aligned ? planes : planes_intrm, kCompressBatchSize); + if (processed != kCompressBatchSize / 2) { + ALOGE("Number of processed lines does not equal input lines."); + return false; + } + if (!is_width_aligned) { + for (int i = 0; i < kCompressBatchSize; ++i) { + if (scanline_copy + i < cinfo->image_height) { + memcpy(y[i], y_intrm[i], cinfo->image_width); + } + } + } + } + return true; +} + +} // namespace ultrahdr diff --git a/lib/src/jpegencoderhelper.cpp b/lib/src/jpegencoderhelper.cpp new file mode 100644 index 0000000..7bab01f --- /dev/null +++ b/lib/src/jpegencoderhelper.cpp @@ -0,0 +1,287 @@ +/* + * Copyright 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "ultrahdr/ultrahdrcommon.h" +#include "ultrahdr/ultrahdr.h" +#include "ultrahdr/jpegencoderhelper.h" + +namespace ultrahdr { + +// The destination manager that can access |mResultBuffer| in JpegEncoderHelper. +struct destination_mgr { + struct jpeg_destination_mgr mgr; + JpegEncoderHelper* encoder; +}; + +JpegEncoderHelper::JpegEncoderHelper() {} + +JpegEncoderHelper::~JpegEncoderHelper() {} + +bool JpegEncoderHelper::compressImage(const uint8_t* yBuffer, const uint8_t* uvBuffer, int width, + int height, int lumaStride, int chromaStride, int quality, + const void* iccBuffer, unsigned int iccSize) { + mResultBuffer.clear(); + if (!encode(yBuffer, uvBuffer, width, height, lumaStride, chromaStride, quality, iccBuffer, + iccSize)) { + return false; + } + ALOGV("Compressed JPEG: %d[%dx%d] -> %zu bytes", (width * height * 12) / 8, width, height, + mResultBuffer.size()); + return true; +} + +void* JpegEncoderHelper::getCompressedImagePtr() { return mResultBuffer.data(); } + +size_t JpegEncoderHelper::getCompressedImageSize() { return mResultBuffer.size(); } + +void JpegEncoderHelper::initDestination(j_compress_ptr cinfo) { + destination_mgr* dest = reinterpret_cast(cinfo->dest); + std::vector& buffer = dest->encoder->mResultBuffer; + buffer.resize(kBlockSize); + dest->mgr.next_output_byte = &buffer[0]; + dest->mgr.free_in_buffer = buffer.size(); +} + +boolean JpegEncoderHelper::emptyOutputBuffer(j_compress_ptr cinfo) { + destination_mgr* dest = reinterpret_cast(cinfo->dest); + std::vector& buffer = dest->encoder->mResultBuffer; + size_t oldsize = buffer.size(); + buffer.resize(oldsize + kBlockSize); + dest->mgr.next_output_byte = &buffer[oldsize]; + dest->mgr.free_in_buffer = kBlockSize; + return true; +} + +void JpegEncoderHelper::terminateDestination(j_compress_ptr cinfo) { + destination_mgr* dest = reinterpret_cast(cinfo->dest); + std::vector& buffer = dest->encoder->mResultBuffer; + buffer.resize(buffer.size() - dest->mgr.free_in_buffer); +} + +void JpegEncoderHelper::outputErrorMessage(j_common_ptr cinfo) { + char buffer[JMSG_LENGTH_MAX]; + + /* Create the message */ + (*cinfo->err->format_message)(cinfo, buffer); + ALOGE("%s\n", buffer); +} + +bool JpegEncoderHelper::encode(const uint8_t* yBuffer, const uint8_t* uvBuffer, int width, + int height, int lumaStride, int chromaStride, int quality, + const void* iccBuffer, unsigned int iccSize) { + jpeg_compress_struct cinfo; + jpeg_error_mgr jerr; + + cinfo.err = jpeg_std_error(&jerr); + cinfo.err->output_message = &outputErrorMessage; + jpeg_create_compress(&cinfo); + setJpegDestination(&cinfo); + setJpegCompressStruct(width, height, quality, &cinfo, uvBuffer == nullptr); + jpeg_start_compress(&cinfo, TRUE); + if (iccBuffer != nullptr && iccSize > 0) { + jpeg_write_marker(&cinfo, JPEG_APP0 + 2, static_cast(iccBuffer), iccSize); + } + bool status = cinfo.num_components == 1 + ? compressY(&cinfo, yBuffer, lumaStride) + : compressYuv(&cinfo, yBuffer, uvBuffer, lumaStride, chromaStride); + jpeg_finish_compress(&cinfo); + jpeg_destroy_compress(&cinfo); + + return status; +} + +void JpegEncoderHelper::setJpegDestination(jpeg_compress_struct* cinfo) { + destination_mgr* dest = static_cast( + (*cinfo->mem->alloc_small)((j_common_ptr)cinfo, JPOOL_PERMANENT, sizeof(destination_mgr))); + dest->encoder = this; + dest->mgr.init_destination = &initDestination; + dest->mgr.empty_output_buffer = &emptyOutputBuffer; + dest->mgr.term_destination = &terminateDestination; + cinfo->dest = reinterpret_cast(dest); +} + +void JpegEncoderHelper::setJpegCompressStruct(int width, int height, int quality, + jpeg_compress_struct* cinfo, bool isSingleChannel) { + cinfo->image_width = width; + cinfo->image_height = height; + cinfo->input_components = isSingleChannel ? 1 : 3; + cinfo->in_color_space = isSingleChannel ? JCS_GRAYSCALE : JCS_YCbCr; + jpeg_set_defaults(cinfo); + jpeg_set_quality(cinfo, quality, TRUE); + cinfo->raw_data_in = TRUE; + cinfo->dct_method = JDCT_ISLOW; + cinfo->comp_info[0].h_samp_factor = cinfo->in_color_space == JCS_GRAYSCALE ? 1 : 2; + cinfo->comp_info[0].v_samp_factor = cinfo->in_color_space == JCS_GRAYSCALE ? 1 : 2; + for (int i = 1; i < cinfo->num_components; i++) { + cinfo->comp_info[i].h_samp_factor = 1; + cinfo->comp_info[i].v_samp_factor = 1; + } +} + +bool JpegEncoderHelper::compressYuv(jpeg_compress_struct* cinfo, const uint8_t* yBuffer, + const uint8_t* uvBuffer, int lumaStride, int chromaStride) { + size_t chroma_plane_size = chromaStride * cinfo->image_height / 2; + uint8_t* y_plane = const_cast(yBuffer); + uint8_t* u_plane = const_cast(uvBuffer); + uint8_t* v_plane = const_cast(u_plane + chroma_plane_size); + + const int aligned_width = ALIGNM(cinfo->image_width, kCompressBatchSize); + const bool need_luma_padding = (lumaStride < aligned_width); + const int aligned_chroma_width = ALIGNM(cinfo->image_width / 2, kCompressBatchSize / 2); + const bool need_chroma_padding = (chromaStride < aligned_chroma_width); + + std::unique_ptr empty = nullptr; + std::unique_ptr y_mcu_row = nullptr; + std::unique_ptr cb_mcu_row = nullptr; + std::unique_ptr cr_mcu_row = nullptr; + uint8_t* y_mcu_row_ptr = nullptr; + uint8_t* cb_mcu_row_ptr = nullptr; + uint8_t* cr_mcu_row_ptr = nullptr; + + JSAMPROW y[kCompressBatchSize]; + JSAMPROW cb[kCompressBatchSize / 2]; + JSAMPROW cr[kCompressBatchSize / 2]; + JSAMPARRAY planes[3]{y, cb, cr}; + + if (cinfo->image_height % kCompressBatchSize != 0) { + empty = std::make_unique(aligned_width); + memset(empty.get(), 0, aligned_width); + } + + if (need_luma_padding) { + size_t mcu_row_size = aligned_width * kCompressBatchSize; + y_mcu_row = std::make_unique(mcu_row_size); + y_mcu_row_ptr = y_mcu_row.get(); + uint8_t* tmp = y_mcu_row_ptr; + for (int i = 0; i < kCompressBatchSize; ++i, tmp += aligned_width) { + memset(tmp + cinfo->image_width, 0, aligned_width - cinfo->image_width); + } + } + + if (need_chroma_padding) { + size_t mcu_row_size = aligned_chroma_width * kCompressBatchSize / 2; + cb_mcu_row = std::make_unique(mcu_row_size); + cb_mcu_row_ptr = cb_mcu_row.get(); + cr_mcu_row = std::make_unique(mcu_row_size); + cr_mcu_row_ptr = cr_mcu_row.get(); + uint8_t* tmp1 = cb_mcu_row_ptr; + uint8_t* tmp2 = cr_mcu_row_ptr; + for (int i = 0; i < kCompressBatchSize / 2; + ++i, tmp1 += aligned_chroma_width, tmp2 += aligned_chroma_width) { + memset(tmp1 + cinfo->image_width / 2, 0, aligned_chroma_width - (cinfo->image_width / 2)); + memset(tmp2 + cinfo->image_width / 2, 0, aligned_chroma_width - (cinfo->image_width / 2)); + } + } + + while (cinfo->next_scanline < cinfo->image_height) { + for (int i = 0; i < kCompressBatchSize; ++i) { + size_t scanline = cinfo->next_scanline + i; + if (scanline < cinfo->image_height) { + y[i] = y_plane + scanline * lumaStride; + if (need_luma_padding) { + uint8_t* tmp = y_mcu_row_ptr + i * aligned_width; + memcpy(tmp, y[i], cinfo->image_width); + y[i] = tmp; + } + } else { + y[i] = empty.get(); + } + } + // cb, cr only have half scanlines + for (int i = 0; i < kCompressBatchSize / 2; ++i) { + size_t scanline = cinfo->next_scanline / 2 + i; + if (scanline < cinfo->image_height / 2) { + int offset = scanline * chromaStride; + cb[i] = u_plane + offset; + cr[i] = v_plane + offset; + if (need_chroma_padding) { + uint8_t* tmp = cb_mcu_row_ptr + i * aligned_chroma_width; + memcpy(tmp, cb[i], cinfo->image_width / 2); + cb[i] = tmp; + tmp = cr_mcu_row_ptr + i * aligned_chroma_width; + memcpy(tmp, cr[i], cinfo->image_width / 2); + cr[i] = tmp; + } + } else { + cb[i] = cr[i] = empty.get(); + } + } + int processed = jpeg_write_raw_data(cinfo, planes, kCompressBatchSize); + if (processed != kCompressBatchSize) { + ALOGE("Number of processed lines does not equal input lines."); + return false; + } + } + return true; +} + +bool JpegEncoderHelper::compressY(jpeg_compress_struct* cinfo, const uint8_t* yBuffer, + int lumaStride) { + uint8_t* y_plane = const_cast(yBuffer); + + const int aligned_luma_width = ALIGNM(cinfo->image_width, kCompressBatchSize); + const bool need_luma_padding = (lumaStride < aligned_luma_width); + + std::unique_ptr empty = nullptr; + std::unique_ptr y_mcu_row = nullptr; + uint8_t* y_mcu_row_ptr = nullptr; + + JSAMPROW y[kCompressBatchSize]; + JSAMPARRAY planes[1]{y}; + + if (cinfo->image_height % kCompressBatchSize != 0) { + empty = std::make_unique(aligned_luma_width); + memset(empty.get(), 0, aligned_luma_width); + } + + if (need_luma_padding) { + size_t mcu_row_size = aligned_luma_width * kCompressBatchSize; + y_mcu_row = std::make_unique(mcu_row_size); + y_mcu_row_ptr = y_mcu_row.get(); + uint8_t* tmp = y_mcu_row_ptr; + for (int i = 0; i < kCompressBatchSize; ++i, tmp += aligned_luma_width) { + memset(tmp + cinfo->image_width, 0, aligned_luma_width - cinfo->image_width); + } + } + + while (cinfo->next_scanline < cinfo->image_height) { + for (int i = 0; i < kCompressBatchSize; ++i) { + size_t scanline = cinfo->next_scanline + i; + if (scanline < cinfo->image_height) { + y[i] = y_plane + scanline * lumaStride; + if (need_luma_padding) { + uint8_t* tmp = y_mcu_row_ptr + i * aligned_luma_width; + memcpy(tmp, y[i], cinfo->image_width); + y[i] = tmp; + } + } else { + y[i] = empty.get(); + } + } + int processed = jpeg_write_raw_data(cinfo, planes, kCompressBatchSize); + if (processed != kCompressBatchSize / 2) { + ALOGE("Number of processed lines does not equal input lines."); + return false; + } + } + return true; +} + +} // namespace ultrahdr diff --git a/lib/src/jpegr.cpp b/lib/src/jpegr.cpp new file mode 100644 index 0000000..ed4fef1 --- /dev/null +++ b/lib/src/jpegr.cpp @@ -0,0 +1,1509 @@ +/* + * Copyright 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifdef _WIN32 +#include +#include +#else +#include +#endif + +#include +#include +#include +#include +#include + +#include "ultrahdr/ultrahdrcommon.h" +#include "ultrahdr/jpegr.h" +#include "ultrahdr/icc.h" +#include "ultrahdr/multipictureformat.h" + +#include "image_io/base/data_segment_data_source.h" +#include "image_io/jpeg/jpeg_info.h" +#include "image_io/jpeg/jpeg_info_builder.h" +#include "image_io/jpeg/jpeg_marker.h" +#include "image_io/jpeg/jpeg_scanner.h" + +using namespace std; +using namespace photos_editing_formats::image_io; + +namespace ultrahdr { + +#define USE_SRGB_INVOETF_LUT 1 +#define USE_HLG_OETF_LUT 1 +#define USE_PQ_OETF_LUT 1 +#define USE_HLG_INVOETF_LUT 1 +#define USE_PQ_INVOETF_LUT 1 +#define USE_APPLY_GAIN_LUT 1 + +#define JPEGR_CHECK(x) \ + { \ + status_t status = (x); \ + if ((status) != JPEGR_NO_ERROR) { \ + return status; \ + } \ + } + +// JPEG compress quality (0 ~ 100) for gain map +static const int kMapCompressQuality = 85; + +int GetCPUCoreCount() { + int cpuCoreCount = 1; + +#if defined(_WIN32) + SYSTEM_INFO system_info; + ZeroMemory(&system_info, sizeof(system_info)); + GetSystemInfo(&system_info); + cpuCoreCount = (size_t)system_info.dwNumberOfProcessors; +#elif defined(_SC_NPROCESSORS_ONLN) + cpuCoreCount = sysconf(_SC_NPROCESSORS_ONLN); +#elif defined(_SC_NPROCESSORS_CONF) + cpuCoreCount = sysconf(_SC_NPROCESSORS_CONF); +#else +#error platform-specific implementation for GetCPUCoreCount() missing. +#endif + if (cpuCoreCount <= 0) cpuCoreCount = 1; + return cpuCoreCount; +} + +/* + * MessageWriter implementation for ALOG functions. + */ +class AlogMessageWriter : public MessageWriter { + public: + void WriteMessage(const Message& message) override { + std::string log = GetFormattedMessage(message); + ALOGD("%s", log.c_str()); + } +}; + +/* + * Helper function copies the JPEG image from without EXIF. + * + * @param pDest destination of the data to be written. + * @param pSource source of data being written. + * @param exif_pos position of the EXIF package, which is aligned with jpegdecoder.getEXIFPos(). + * (4 bytes offset to FF sign, the byte after FF E1 XX XX ). + * @param exif_size exif size without the initial 4 bytes, aligned with jpegdecoder.getEXIFSize(). + */ +static void copyJpegWithoutExif(jr_compressed_ptr pDest, jr_compressed_ptr pSource, size_t exif_pos, + size_t exif_size) { + const size_t exif_offset = 4; // exif_pos has 4 bytes offset to the FF sign + pDest->length = pSource->length - exif_size - exif_offset; + pDest->data = new uint8_t[pDest->length]; + pDest->maxLength = pDest->length; + pDest->colorGamut = pSource->colorGamut; + memcpy(pDest->data, pSource->data, exif_pos - exif_offset); + memcpy((uint8_t*)pDest->data + exif_pos - exif_offset, + (uint8_t*)pSource->data + exif_pos + exif_size, pSource->length - exif_pos - exif_size); +} + +status_t JpegR::areInputArgumentsValid(jr_uncompressed_ptr p010_image_ptr, + jr_uncompressed_ptr yuv420_image_ptr, + ultrahdr_transfer_function hdr_tf, + jr_compressed_ptr dest_ptr) { + if (p010_image_ptr == nullptr || p010_image_ptr->data == nullptr) { + ALOGE("Received nullptr for input p010 image"); + return ERROR_JPEGR_BAD_PTR; + } + if (p010_image_ptr->width % 2 != 0 || p010_image_ptr->height % 2 != 0) { + ALOGE("Image dimensions cannot be odd, image dimensions %zux%zu", p010_image_ptr->width, + p010_image_ptr->height); + return ERROR_JPEGR_UNSUPPORTED_WIDTH_HEIGHT; + } + if (p010_image_ptr->width < kMinWidth || p010_image_ptr->height < kMinHeight) { + ALOGE("Image dimensions cannot be less than %dx%d, image dimensions %zux%zu", kMinWidth, + kMinHeight, p010_image_ptr->width, p010_image_ptr->height); + return ERROR_JPEGR_UNSUPPORTED_WIDTH_HEIGHT; + } + if (p010_image_ptr->width > kMaxWidth || p010_image_ptr->height > kMaxHeight) { + ALOGE("Image dimensions cannot be larger than %dx%d, image dimensions %zux%zu", kMaxWidth, + kMaxHeight, p010_image_ptr->width, p010_image_ptr->height); + return ERROR_JPEGR_UNSUPPORTED_WIDTH_HEIGHT; + } + if (p010_image_ptr->colorGamut <= ULTRAHDR_COLORGAMUT_UNSPECIFIED || + p010_image_ptr->colorGamut > ULTRAHDR_COLORGAMUT_MAX) { + ALOGE("Unrecognized p010 color gamut %d", p010_image_ptr->colorGamut); + return ERROR_JPEGR_INVALID_COLORGAMUT; + } + if (p010_image_ptr->luma_stride != 0 && p010_image_ptr->luma_stride < p010_image_ptr->width) { + ALOGE("Luma stride must not be smaller than width, stride=%zu, width=%zu", + p010_image_ptr->luma_stride, p010_image_ptr->width); + return ERROR_JPEGR_INVALID_STRIDE; + } + if (p010_image_ptr->chroma_data != nullptr && + p010_image_ptr->chroma_stride < p010_image_ptr->width) { + ALOGE("Chroma stride must not be smaller than width, stride=%zu, width=%zu", + p010_image_ptr->chroma_stride, p010_image_ptr->width); + return ERROR_JPEGR_INVALID_STRIDE; + } + if (dest_ptr == nullptr || dest_ptr->data == nullptr) { + ALOGE("Received nullptr for destination"); + return ERROR_JPEGR_BAD_PTR; + } + if (hdr_tf <= ULTRAHDR_TF_UNSPECIFIED || hdr_tf > ULTRAHDR_TF_MAX || hdr_tf == ULTRAHDR_TF_SRGB) { + ALOGE("Invalid hdr transfer function %d", hdr_tf); + return ERROR_JPEGR_INVALID_TRANS_FUNC; + } + if (yuv420_image_ptr == nullptr) { + return JPEGR_NO_ERROR; + } + if (yuv420_image_ptr->data == nullptr) { + ALOGE("Received nullptr for uncompressed 420 image"); + return ERROR_JPEGR_BAD_PTR; + } + if (yuv420_image_ptr->luma_stride != 0 && + yuv420_image_ptr->luma_stride < yuv420_image_ptr->width) { + ALOGE("Luma stride must not be smaller than width, stride=%zu, width=%zu", + yuv420_image_ptr->luma_stride, yuv420_image_ptr->width); + return ERROR_JPEGR_INVALID_STRIDE; + } + if (yuv420_image_ptr->chroma_data != nullptr && + yuv420_image_ptr->chroma_stride < yuv420_image_ptr->width / 2) { + ALOGE("Chroma stride must not be smaller than (width / 2), stride=%zu, width=%zu", + yuv420_image_ptr->chroma_stride, yuv420_image_ptr->width); + return ERROR_JPEGR_INVALID_STRIDE; + } + if (p010_image_ptr->width != yuv420_image_ptr->width || + p010_image_ptr->height != yuv420_image_ptr->height) { + ALOGE("Image resolutions mismatch: P010: %zux%zu, YUV420: %zux%zu", p010_image_ptr->width, + p010_image_ptr->height, yuv420_image_ptr->width, yuv420_image_ptr->height); + return ERROR_JPEGR_RESOLUTION_MISMATCH; + } + if (yuv420_image_ptr->colorGamut <= ULTRAHDR_COLORGAMUT_UNSPECIFIED || + yuv420_image_ptr->colorGamut > ULTRAHDR_COLORGAMUT_MAX) { + ALOGE("Unrecognized 420 color gamut %d", yuv420_image_ptr->colorGamut); + return ERROR_JPEGR_INVALID_COLORGAMUT; + } + return JPEGR_NO_ERROR; +} + +status_t JpegR::areInputArgumentsValid(jr_uncompressed_ptr p010_image_ptr, + jr_uncompressed_ptr yuv420_image_ptr, + ultrahdr_transfer_function hdr_tf, + jr_compressed_ptr dest_ptr, int quality) { + if (quality < 0 || quality > 100) { + ALOGE("quality factor is out side range [0-100], quality factor : %d", quality); + return ERROR_JPEGR_INVALID_QUALITY_FACTOR; + } + return areInputArgumentsValid(p010_image_ptr, yuv420_image_ptr, hdr_tf, dest_ptr); +} + +/* Encode API-0 */ +status_t JpegR::encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, ultrahdr_transfer_function hdr_tf, + jr_compressed_ptr dest, int quality, jr_exif_ptr exif) { + // validate input arguments + JPEGR_CHECK(areInputArgumentsValid(p010_image_ptr, nullptr, hdr_tf, dest, quality)); + if (exif != nullptr && exif->data == nullptr) { + ALOGE("received nullptr for exif metadata"); + return ERROR_JPEGR_BAD_PTR; + } + + // clean up input structure for later usage + jpegr_uncompressed_struct p010_image = *p010_image_ptr; + if (p010_image.luma_stride == 0) p010_image.luma_stride = p010_image.width; + if (!p010_image.chroma_data) { + uint16_t* data = reinterpret_cast(p010_image.data); + p010_image.chroma_data = data + p010_image.luma_stride * p010_image.height; + p010_image.chroma_stride = p010_image.luma_stride; + } + + const size_t yu420_luma_stride = ALIGNM(p010_image.width, JpegEncoderHelper::kCompressBatchSize); + unique_ptr yuv420_image_data = + make_unique(yu420_luma_stride * p010_image.height * 3 / 2); + jpegr_uncompressed_struct yuv420_image; + yuv420_image.data = yuv420_image_data.get(); + yuv420_image.width = p010_image.width; + yuv420_image.height = p010_image.height; + yuv420_image.colorGamut = p010_image.colorGamut; + yuv420_image.chroma_data = nullptr; + yuv420_image.luma_stride = yu420_luma_stride; + yuv420_image.chroma_stride = yu420_luma_stride >> 1; + uint8_t* data = reinterpret_cast(yuv420_image.data); + yuv420_image.chroma_data = data + yuv420_image.luma_stride * yuv420_image.height; + + // tone map + JPEGR_CHECK(toneMap(&p010_image, &yuv420_image)); + + // gain map + ultrahdr_metadata_struct metadata; + metadata.version = kJpegrVersion; + jpegr_uncompressed_struct gainmap_image; + JPEGR_CHECK(generateGainMap(&yuv420_image, &p010_image, hdr_tf, &metadata, &gainmap_image)); + std::unique_ptr map_data; + map_data.reset(reinterpret_cast(gainmap_image.data)); + + // compress gain map + JpegEncoderHelper jpeg_enc_obj_gm; + JPEGR_CHECK(compressGainMap(&gainmap_image, &jpeg_enc_obj_gm)); + jpegr_compressed_struct compressed_map; + compressed_map.data = jpeg_enc_obj_gm.getCompressedImagePtr(); + compressed_map.length = static_cast(jpeg_enc_obj_gm.getCompressedImageSize()); + compressed_map.maxLength = static_cast(jpeg_enc_obj_gm.getCompressedImageSize()); + compressed_map.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED; + + std::shared_ptr icc = + IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB, yuv420_image.colorGamut); + + // convert to Bt601 YUV encoding for JPEG encode + if (yuv420_image.colorGamut != ULTRAHDR_COLORGAMUT_P3) { + JPEGR_CHECK(convertYuv(&yuv420_image, yuv420_image.colorGamut, ULTRAHDR_COLORGAMUT_P3)); + } + + // compress 420 image + JpegEncoderHelper jpeg_enc_obj_yuv420; + if (!jpeg_enc_obj_yuv420.compressImage(reinterpret_cast(yuv420_image.data), + reinterpret_cast(yuv420_image.chroma_data), + yuv420_image.width, yuv420_image.height, + yuv420_image.luma_stride, yuv420_image.chroma_stride, + quality, icc->getData(), icc->getLength())) { + return ERROR_JPEGR_ENCODE_ERROR; + } + jpegr_compressed_struct jpeg; + jpeg.data = jpeg_enc_obj_yuv420.getCompressedImagePtr(); + jpeg.length = static_cast(jpeg_enc_obj_yuv420.getCompressedImageSize()); + jpeg.maxLength = static_cast(jpeg_enc_obj_yuv420.getCompressedImageSize()); + jpeg.colorGamut = yuv420_image.colorGamut; + + // append gain map, no ICC since JPEG encode already did it + JPEGR_CHECK(appendGainMap(&jpeg, &compressed_map, exif, /* icc */ nullptr, /* icc size */ 0, + &metadata, dest)); + + return JPEGR_NO_ERROR; +} + +/* Encode API-1 */ +status_t JpegR::encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, + jr_uncompressed_ptr yuv420_image_ptr, ultrahdr_transfer_function hdr_tf, + jr_compressed_ptr dest, int quality, jr_exif_ptr exif) { + // validate input arguments + if (yuv420_image_ptr == nullptr) { + ALOGE("received nullptr for uncompressed 420 image"); + return ERROR_JPEGR_BAD_PTR; + } + if (exif != nullptr && exif->data == nullptr) { + ALOGE("received nullptr for exif metadata"); + return ERROR_JPEGR_BAD_PTR; + } + JPEGR_CHECK(areInputArgumentsValid(p010_image_ptr, yuv420_image_ptr, hdr_tf, dest, quality)) + + // clean up input structure for later usage + jpegr_uncompressed_struct p010_image = *p010_image_ptr; + if (p010_image.luma_stride == 0) p010_image.luma_stride = p010_image.width; + if (!p010_image.chroma_data) { + uint16_t* data = reinterpret_cast(p010_image.data); + p010_image.chroma_data = data + p010_image.luma_stride * p010_image.height; + p010_image.chroma_stride = p010_image.luma_stride; + } + jpegr_uncompressed_struct yuv420_image = *yuv420_image_ptr; + if (yuv420_image.luma_stride == 0) yuv420_image.luma_stride = yuv420_image.width; + if (!yuv420_image.chroma_data) { + uint8_t* data = reinterpret_cast(yuv420_image.data); + yuv420_image.chroma_data = data + yuv420_image.luma_stride * yuv420_image.height; + yuv420_image.chroma_stride = yuv420_image.luma_stride >> 1; + } + + // gain map + ultrahdr_metadata_struct metadata; + metadata.version = kJpegrVersion; + jpegr_uncompressed_struct gainmap_image; + JPEGR_CHECK(generateGainMap(&yuv420_image, &p010_image, hdr_tf, &metadata, &gainmap_image)); + std::unique_ptr map_data; + map_data.reset(reinterpret_cast(gainmap_image.data)); + + // compress gain map + JpegEncoderHelper jpeg_enc_obj_gm; + JPEGR_CHECK(compressGainMap(&gainmap_image, &jpeg_enc_obj_gm)); + jpegr_compressed_struct compressed_map; + compressed_map.data = jpeg_enc_obj_gm.getCompressedImagePtr(); + compressed_map.length = static_cast(jpeg_enc_obj_gm.getCompressedImageSize()); + compressed_map.maxLength = static_cast(jpeg_enc_obj_gm.getCompressedImageSize()); + compressed_map.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED; + + std::shared_ptr icc = + IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB, yuv420_image.colorGamut); + + jpegr_uncompressed_struct yuv420_bt601_image = yuv420_image; + unique_ptr yuv_420_bt601_data; + // Convert to bt601 YUV encoding for JPEG encode + if (yuv420_image.colorGamut != ULTRAHDR_COLORGAMUT_P3) { + const size_t yuv_420_bt601_luma_stride = + ALIGNM(yuv420_image.width, JpegEncoderHelper::kCompressBatchSize); + yuv_420_bt601_data = + make_unique(yuv_420_bt601_luma_stride * yuv420_image.height * 3 / 2); + yuv420_bt601_image.data = yuv_420_bt601_data.get(); + yuv420_bt601_image.colorGamut = yuv420_image.colorGamut; + yuv420_bt601_image.luma_stride = yuv_420_bt601_luma_stride; + uint8_t* data = reinterpret_cast(yuv420_bt601_image.data); + yuv420_bt601_image.chroma_data = data + yuv_420_bt601_luma_stride * yuv420_image.height; + yuv420_bt601_image.chroma_stride = yuv_420_bt601_luma_stride >> 1; + + { + // copy luma + uint8_t* y_dst = reinterpret_cast(yuv420_bt601_image.data); + uint8_t* y_src = reinterpret_cast(yuv420_image.data); + if (yuv420_bt601_image.luma_stride == yuv420_image.luma_stride) { + memcpy(y_dst, y_src, yuv420_bt601_image.luma_stride * yuv420_image.height); + } else { + for (size_t i = 0; i < yuv420_image.height; i++) { + memcpy(y_dst, y_src, yuv420_image.width); + if (yuv420_image.width != yuv420_bt601_image.luma_stride) { + memset(y_dst + yuv420_image.width, 0, + yuv420_bt601_image.luma_stride - yuv420_image.width); + } + y_dst += yuv420_bt601_image.luma_stride; + y_src += yuv420_image.luma_stride; + } + } + } + + if (yuv420_bt601_image.chroma_stride == yuv420_image.chroma_stride) { + // copy luma + uint8_t* ch_dst = reinterpret_cast(yuv420_bt601_image.chroma_data); + uint8_t* ch_src = reinterpret_cast(yuv420_image.chroma_data); + memcpy(ch_dst, ch_src, yuv420_bt601_image.chroma_stride * yuv420_image.height); + } else { + // copy cb & cr + uint8_t* cb_dst = reinterpret_cast(yuv420_bt601_image.chroma_data); + uint8_t* cb_src = reinterpret_cast(yuv420_image.chroma_data); + uint8_t* cr_dst = cb_dst + (yuv420_bt601_image.chroma_stride * yuv420_bt601_image.height / 2); + uint8_t* cr_src = cb_src + (yuv420_image.chroma_stride * yuv420_image.height / 2); + for (size_t i = 0; i < yuv420_image.height / 2; i++) { + memcpy(cb_dst, cb_src, yuv420_image.width / 2); + memcpy(cr_dst, cr_src, yuv420_image.width / 2); + if (yuv420_bt601_image.width / 2 != yuv420_bt601_image.chroma_stride) { + memset(cb_dst + yuv420_image.width / 2, 0, + yuv420_bt601_image.chroma_stride - yuv420_image.width / 2); + memset(cr_dst + yuv420_image.width / 2, 0, + yuv420_bt601_image.chroma_stride - yuv420_image.width / 2); + } + cb_dst += yuv420_bt601_image.chroma_stride; + cb_src += yuv420_image.chroma_stride; + cr_dst += yuv420_bt601_image.chroma_stride; + cr_src += yuv420_image.chroma_stride; + } + } + JPEGR_CHECK(convertYuv(&yuv420_bt601_image, yuv420_image.colorGamut, ULTRAHDR_COLORGAMUT_P3)); + } + + // compress 420 image + JpegEncoderHelper jpeg_enc_obj_yuv420; + if (!jpeg_enc_obj_yuv420.compressImage( + reinterpret_cast(yuv420_bt601_image.data), + reinterpret_cast(yuv420_bt601_image.chroma_data), yuv420_bt601_image.width, + yuv420_bt601_image.height, yuv420_bt601_image.luma_stride, + yuv420_bt601_image.chroma_stride, quality, icc->getData(), icc->getLength())) { + return ERROR_JPEGR_ENCODE_ERROR; + } + + jpegr_compressed_struct jpeg; + jpeg.data = jpeg_enc_obj_yuv420.getCompressedImagePtr(); + jpeg.length = static_cast(jpeg_enc_obj_yuv420.getCompressedImageSize()); + jpeg.maxLength = static_cast(jpeg_enc_obj_yuv420.getCompressedImageSize()); + jpeg.colorGamut = yuv420_image.colorGamut; + + // append gain map, no ICC since JPEG encode already did it + JPEGR_CHECK(appendGainMap(&jpeg, &compressed_map, exif, /* icc */ nullptr, /* icc size */ 0, + &metadata, dest)); + return JPEGR_NO_ERROR; +} + +/* Encode API-2 */ +status_t JpegR::encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, + jr_uncompressed_ptr yuv420_image_ptr, + jr_compressed_ptr yuv420jpg_image_ptr, + ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest) { + // validate input arguments + if (yuv420_image_ptr == nullptr) { + ALOGE("received nullptr for uncompressed 420 image"); + return ERROR_JPEGR_BAD_PTR; + } + if (yuv420jpg_image_ptr == nullptr || yuv420jpg_image_ptr->data == nullptr) { + ALOGE("received nullptr for compressed jpeg image"); + return ERROR_JPEGR_BAD_PTR; + } + JPEGR_CHECK(areInputArgumentsValid(p010_image_ptr, yuv420_image_ptr, hdr_tf, dest)) + + // clean up input structure for later usage + jpegr_uncompressed_struct p010_image = *p010_image_ptr; + if (p010_image.luma_stride == 0) p010_image.luma_stride = p010_image.width; + if (!p010_image.chroma_data) { + uint16_t* data = reinterpret_cast(p010_image.data); + p010_image.chroma_data = data + p010_image.luma_stride * p010_image.height; + p010_image.chroma_stride = p010_image.luma_stride; + } + jpegr_uncompressed_struct yuv420_image = *yuv420_image_ptr; + if (yuv420_image.luma_stride == 0) yuv420_image.luma_stride = yuv420_image.width; + if (!yuv420_image.chroma_data) { + uint8_t* data = reinterpret_cast(yuv420_image.data); + yuv420_image.chroma_data = data + yuv420_image.luma_stride * p010_image.height; + yuv420_image.chroma_stride = yuv420_image.luma_stride >> 1; + } + + // gain map + ultrahdr_metadata_struct metadata; + metadata.version = kJpegrVersion; + jpegr_uncompressed_struct gainmap_image; + JPEGR_CHECK(generateGainMap(&yuv420_image, &p010_image, hdr_tf, &metadata, &gainmap_image)); + std::unique_ptr map_data; + map_data.reset(reinterpret_cast(gainmap_image.data)); + + // compress gain map + JpegEncoderHelper jpeg_enc_obj_gm; + JPEGR_CHECK(compressGainMap(&gainmap_image, &jpeg_enc_obj_gm)); + jpegr_compressed_struct gainmapjpg_image; + gainmapjpg_image.data = jpeg_enc_obj_gm.getCompressedImagePtr(); + gainmapjpg_image.length = static_cast(jpeg_enc_obj_gm.getCompressedImageSize()); + gainmapjpg_image.maxLength = static_cast(jpeg_enc_obj_gm.getCompressedImageSize()); + gainmapjpg_image.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED; + + return encodeJPEGR(yuv420jpg_image_ptr, &gainmapjpg_image, &metadata, dest); +} + +/* Encode API-3 */ +status_t JpegR::encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, + jr_compressed_ptr yuv420jpg_image_ptr, + ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest) { + // validate input arguments + if (yuv420jpg_image_ptr == nullptr || yuv420jpg_image_ptr->data == nullptr) { + ALOGE("received nullptr for compressed jpeg image"); + return ERROR_JPEGR_BAD_PTR; + } + JPEGR_CHECK(areInputArgumentsValid(p010_image_ptr, nullptr, hdr_tf, dest)) + + // clean up input structure for later usage + jpegr_uncompressed_struct p010_image = *p010_image_ptr; + if (p010_image.luma_stride == 0) p010_image.luma_stride = p010_image.width; + if (!p010_image.chroma_data) { + uint16_t* data = reinterpret_cast(p010_image.data); + p010_image.chroma_data = data + p010_image.luma_stride * p010_image.height; + p010_image.chroma_stride = p010_image.luma_stride; + } + + // decode input jpeg, gamut is going to be bt601. + JpegDecoderHelper jpeg_dec_obj_yuv420; + if (!jpeg_dec_obj_yuv420.decompressImage(yuv420jpg_image_ptr->data, + yuv420jpg_image_ptr->length)) { + return ERROR_JPEGR_DECODE_ERROR; + } + jpegr_uncompressed_struct yuv420_image{}; + yuv420_image.data = jpeg_dec_obj_yuv420.getDecompressedImagePtr(); + yuv420_image.width = jpeg_dec_obj_yuv420.getDecompressedImageWidth(); + yuv420_image.height = jpeg_dec_obj_yuv420.getDecompressedImageHeight(); + yuv420_image.colorGamut = yuv420jpg_image_ptr->colorGamut; + if (yuv420_image.luma_stride == 0) yuv420_image.luma_stride = yuv420_image.width; + if (!yuv420_image.chroma_data) { + uint8_t* data = reinterpret_cast(yuv420_image.data); + yuv420_image.chroma_data = data + yuv420_image.luma_stride * p010_image.height; + yuv420_image.chroma_stride = yuv420_image.luma_stride >> 1; + } + + if (p010_image_ptr->width != yuv420_image.width || + p010_image_ptr->height != yuv420_image.height) { + return ERROR_JPEGR_RESOLUTION_MISMATCH; + } + + // gain map + ultrahdr_metadata_struct metadata; + metadata.version = kJpegrVersion; + jpegr_uncompressed_struct gainmap_image; + JPEGR_CHECK(generateGainMap(&yuv420_image, &p010_image, hdr_tf, &metadata, &gainmap_image, + true /* sdr_is_601 */)); + std::unique_ptr map_data; + map_data.reset(reinterpret_cast(gainmap_image.data)); + + // compress gain map + JpegEncoderHelper jpeg_enc_obj_gm; + JPEGR_CHECK(compressGainMap(&gainmap_image, &jpeg_enc_obj_gm)); + jpegr_compressed_struct gainmapjpg_image; + gainmapjpg_image.data = jpeg_enc_obj_gm.getCompressedImagePtr(); + gainmapjpg_image.length = static_cast(jpeg_enc_obj_gm.getCompressedImageSize()); + gainmapjpg_image.maxLength = static_cast(jpeg_enc_obj_gm.getCompressedImageSize()); + gainmapjpg_image.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED; + + return encodeJPEGR(yuv420jpg_image_ptr, &gainmapjpg_image, &metadata, dest); +} + +/* Encode API-4 */ +status_t JpegR::encodeJPEGR(jr_compressed_ptr yuv420jpg_image_ptr, + jr_compressed_ptr gainmapjpg_image_ptr, ultrahdr_metadata_ptr metadata, + jr_compressed_ptr dest) { + if (yuv420jpg_image_ptr == nullptr || yuv420jpg_image_ptr->data == nullptr) { + ALOGE("received nullptr for compressed jpeg image"); + return ERROR_JPEGR_BAD_PTR; + } + if (gainmapjpg_image_ptr == nullptr || gainmapjpg_image_ptr->data == nullptr) { + ALOGE("received nullptr for compressed gain map"); + return ERROR_JPEGR_BAD_PTR; + } + if (dest == nullptr || dest->data == nullptr) { + ALOGE("received nullptr for destination"); + return ERROR_JPEGR_BAD_PTR; + } + + // We just want to check if ICC is present, so don't do a full decode. Note, + // this doesn't verify that the ICC is valid. + JpegDecoderHelper decoder; + std::vector icc; + decoder.getCompressedImageParameters(yuv420jpg_image_ptr->data, yuv420jpg_image_ptr->length, + /* pWidth */ nullptr, /* pHeight */ nullptr, &icc, + /* exifData */ nullptr); + + // Add ICC if not already present. + if (icc.size() > 0) { + JPEGR_CHECK(appendGainMap(yuv420jpg_image_ptr, gainmapjpg_image_ptr, /* exif */ nullptr, + /* icc */ nullptr, /* icc size */ 0, metadata, dest)); + } else { + std::shared_ptr newIcc = + IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB, yuv420jpg_image_ptr->colorGamut); + JPEGR_CHECK(appendGainMap(yuv420jpg_image_ptr, gainmapjpg_image_ptr, /* exif */ nullptr, + newIcc->getData(), newIcc->getLength(), metadata, dest)); + } + + return JPEGR_NO_ERROR; +} + +status_t JpegR::getJPEGRInfo(jr_compressed_ptr jpegr_image_ptr, jr_info_ptr jpeg_image_info_ptr) { + if (jpegr_image_ptr == nullptr || jpegr_image_ptr->data == nullptr) { + ALOGE("received nullptr for compressed jpegr image"); + return ERROR_JPEGR_BAD_PTR; + } + if (jpeg_image_info_ptr == nullptr) { + ALOGE("received nullptr for compressed jpegr info struct"); + return ERROR_JPEGR_BAD_PTR; + } + + jpegr_compressed_struct primary_image, gainmap_image; + status_t status = extractPrimaryImageAndGainMap(jpegr_image_ptr, &primary_image, &gainmap_image); + if (status != JPEGR_NO_ERROR && status != ERROR_JPEGR_GAIN_MAP_IMAGE_NOT_FOUND) { + return status; + } + + JpegDecoderHelper jpeg_dec_obj_hdr; + if (!jpeg_dec_obj_hdr.getCompressedImageParameters( + primary_image.data, primary_image.length, &jpeg_image_info_ptr->width, + &jpeg_image_info_ptr->height, jpeg_image_info_ptr->iccData, + jpeg_image_info_ptr->exifData)) { + return ERROR_JPEGR_DECODE_ERROR; + } + + return status; +} + +/* Decode API */ +status_t JpegR::decodeJPEGR(jr_compressed_ptr jpegr_image_ptr, jr_uncompressed_ptr dest, + float max_display_boost, jr_exif_ptr exif, + ultrahdr_output_format output_format, + jr_uncompressed_ptr gainmap_image_ptr, ultrahdr_metadata_ptr metadata) { + if (jpegr_image_ptr == nullptr || jpegr_image_ptr->data == nullptr) { + ALOGE("received nullptr for compressed jpegr image"); + return ERROR_JPEGR_BAD_PTR; + } + if (dest == nullptr || dest->data == nullptr) { + ALOGE("received nullptr for dest image"); + return ERROR_JPEGR_BAD_PTR; + } + if (max_display_boost < 1.0f) { + ALOGE("received bad value for max_display_boost %f", max_display_boost); + return ERROR_JPEGR_INVALID_DISPLAY_BOOST; + } + if (exif != nullptr && exif->data == nullptr) { + ALOGE("received nullptr address for exif data"); + return ERROR_JPEGR_BAD_PTR; + } + if (output_format <= ULTRAHDR_OUTPUT_UNSPECIFIED || output_format > ULTRAHDR_OUTPUT_MAX) { + ALOGE("received bad value for output format %d", output_format); + return ERROR_JPEGR_INVALID_OUTPUT_FORMAT; + } + + jpegr_compressed_struct primary_jpeg_image, gainmap_jpeg_image; + status_t status = + extractPrimaryImageAndGainMap(jpegr_image_ptr, &primary_jpeg_image, &gainmap_jpeg_image); + if (status != JPEGR_NO_ERROR) { + if (output_format != ULTRAHDR_OUTPUT_SDR || status != ERROR_JPEGR_GAIN_MAP_IMAGE_NOT_FOUND) { + ALOGE("received invalid compressed jpegr image"); + return status; + } + } + + JpegDecoderHelper jpeg_dec_obj_yuv420; + if (!jpeg_dec_obj_yuv420.decompressImage(primary_jpeg_image.data, primary_jpeg_image.length, + (output_format == ULTRAHDR_OUTPUT_SDR))) { + return ERROR_JPEGR_DECODE_ERROR; + } + + if (output_format == ULTRAHDR_OUTPUT_SDR) { + if ((jpeg_dec_obj_yuv420.getDecompressedImageWidth() * + jpeg_dec_obj_yuv420.getDecompressedImageHeight() * 4) > + jpeg_dec_obj_yuv420.getDecompressedImageSize()) { + return ERROR_JPEGR_DECODE_ERROR; + } + } else { + if ((jpeg_dec_obj_yuv420.getDecompressedImageWidth() * + jpeg_dec_obj_yuv420.getDecompressedImageHeight() * 3 / 2) > + jpeg_dec_obj_yuv420.getDecompressedImageSize()) { + return ERROR_JPEGR_DECODE_ERROR; + } + } + + if (exif != nullptr) { + if (exif->data == nullptr) { + return ERROR_JPEGR_BAD_PTR; + } + if (exif->length < jpeg_dec_obj_yuv420.getEXIFSize()) { + return ERROR_JPEGR_BUFFER_TOO_SMALL; + } + memcpy(exif->data, jpeg_dec_obj_yuv420.getEXIFPtr(), jpeg_dec_obj_yuv420.getEXIFSize()); + exif->length = jpeg_dec_obj_yuv420.getEXIFSize(); + } + + if (output_format == ULTRAHDR_OUTPUT_SDR) { + dest->width = jpeg_dec_obj_yuv420.getDecompressedImageWidth(); + dest->height = jpeg_dec_obj_yuv420.getDecompressedImageHeight(); + memcpy(dest->data, jpeg_dec_obj_yuv420.getDecompressedImagePtr(), + dest->width * dest->height * 4); + return JPEGR_NO_ERROR; + } + + JpegDecoderHelper jpeg_dec_obj_gm; + if (!jpeg_dec_obj_gm.decompressImage(gainmap_jpeg_image.data, gainmap_jpeg_image.length)) { + return ERROR_JPEGR_DECODE_ERROR; + } + if ((jpeg_dec_obj_gm.getDecompressedImageWidth() * jpeg_dec_obj_gm.getDecompressedImageHeight()) > + jpeg_dec_obj_gm.getDecompressedImageSize()) { + return ERROR_JPEGR_DECODE_ERROR; + } + + jpegr_uncompressed_struct gainmap_image; + gainmap_image.data = jpeg_dec_obj_gm.getDecompressedImagePtr(); + gainmap_image.width = jpeg_dec_obj_gm.getDecompressedImageWidth(); + gainmap_image.height = jpeg_dec_obj_gm.getDecompressedImageHeight(); + + if (gainmap_image_ptr != nullptr) { + gainmap_image_ptr->width = gainmap_image.width; + gainmap_image_ptr->height = gainmap_image.height; + int size = gainmap_image_ptr->width * gainmap_image_ptr->height; + gainmap_image_ptr->data = malloc(size); + memcpy(gainmap_image_ptr->data, gainmap_image.data, size); + } + + ultrahdr_metadata_struct uhdr_metadata; + if (!getMetadataFromXMP(static_cast(jpeg_dec_obj_gm.getXMPPtr()), + jpeg_dec_obj_gm.getXMPSize(), &uhdr_metadata)) { + return ERROR_JPEGR_METADATA_ERROR; + } + + if (metadata != nullptr) { + metadata->version = uhdr_metadata.version; + metadata->minContentBoost = uhdr_metadata.minContentBoost; + metadata->maxContentBoost = uhdr_metadata.maxContentBoost; + metadata->gamma = uhdr_metadata.gamma; + metadata->offsetSdr = uhdr_metadata.offsetSdr; + metadata->offsetHdr = uhdr_metadata.offsetHdr; + metadata->hdrCapacityMin = uhdr_metadata.hdrCapacityMin; + metadata->hdrCapacityMax = uhdr_metadata.hdrCapacityMax; + } + + jpegr_uncompressed_struct yuv420_image; + yuv420_image.data = jpeg_dec_obj_yuv420.getDecompressedImagePtr(); + yuv420_image.width = jpeg_dec_obj_yuv420.getDecompressedImageWidth(); + yuv420_image.height = jpeg_dec_obj_yuv420.getDecompressedImageHeight(); + yuv420_image.colorGamut = IccHelper::readIccColorGamut(jpeg_dec_obj_yuv420.getICCPtr(), + jpeg_dec_obj_yuv420.getICCSize()); + yuv420_image.luma_stride = yuv420_image.width; + uint8_t* data = reinterpret_cast(yuv420_image.data); + yuv420_image.chroma_data = data + yuv420_image.luma_stride * yuv420_image.height; + yuv420_image.chroma_stride = yuv420_image.width >> 1; + + JPEGR_CHECK(applyGainMap(&yuv420_image, &gainmap_image, &uhdr_metadata, output_format, + max_display_boost, dest)); + return JPEGR_NO_ERROR; +} + +status_t JpegR::compressGainMap(jr_uncompressed_ptr gainmap_image_ptr, + JpegEncoderHelper* jpeg_enc_obj_ptr) { + if (gainmap_image_ptr == nullptr || jpeg_enc_obj_ptr == nullptr) { + return ERROR_JPEGR_BAD_PTR; + } + + // Don't need to convert YUV to Bt601 since single channel + if (!jpeg_enc_obj_ptr->compressImage(reinterpret_cast(gainmap_image_ptr->data), nullptr, + gainmap_image_ptr->width, gainmap_image_ptr->height, + gainmap_image_ptr->luma_stride, 0, kMapCompressQuality, + nullptr, 0)) { + return ERROR_JPEGR_ENCODE_ERROR; + } + + return JPEGR_NO_ERROR; +} + +const int kJobSzInRows = 16; +static_assert(kJobSzInRows > 0 && kJobSzInRows % kMapDimensionScaleFactor == 0, + "align job size to kMapDimensionScaleFactor"); + +class JobQueue { + public: + bool dequeueJob(size_t& rowStart, size_t& rowEnd); + void enqueueJob(size_t rowStart, size_t rowEnd); + void markQueueForEnd(); + void reset(); + + private: + bool mQueuedAllJobs = false; + std::deque> mJobs; + std::mutex mMutex; + std::condition_variable mCv; +}; + +bool JobQueue::dequeueJob(size_t& rowStart, size_t& rowEnd) { + std::unique_lock lock{mMutex}; + while (true) { + if (mJobs.empty()) { + if (mQueuedAllJobs) { + return false; + } else { + mCv.wait_for(lock, std::chrono::milliseconds(100)); + } + } else { + auto it = mJobs.begin(); + rowStart = std::get<0>(*it); + rowEnd = std::get<1>(*it); + mJobs.erase(it); + return true; + } + } + return false; +} + +void JobQueue::enqueueJob(size_t rowStart, size_t rowEnd) { + std::unique_lock lock{mMutex}; + mJobs.push_back(std::make_tuple(rowStart, rowEnd)); + lock.unlock(); + mCv.notify_one(); +} + +void JobQueue::markQueueForEnd() { + std::unique_lock lock{mMutex}; + mQueuedAllJobs = true; + lock.unlock(); + mCv.notify_all(); +} + +void JobQueue::reset() { + std::unique_lock lock{mMutex}; + mJobs.clear(); + mQueuedAllJobs = false; +} + +status_t JpegR::generateGainMap(jr_uncompressed_ptr yuv420_image_ptr, + jr_uncompressed_ptr p010_image_ptr, + ultrahdr_transfer_function hdr_tf, ultrahdr_metadata_ptr metadata, + jr_uncompressed_ptr dest, bool sdr_is_601) { + if (yuv420_image_ptr == nullptr || p010_image_ptr == nullptr || metadata == nullptr || + dest == nullptr || yuv420_image_ptr->data == nullptr || + yuv420_image_ptr->chroma_data == nullptr || p010_image_ptr->data == nullptr || + p010_image_ptr->chroma_data == nullptr) { + return ERROR_JPEGR_BAD_PTR; + } + if (yuv420_image_ptr->width != p010_image_ptr->width || + yuv420_image_ptr->height != p010_image_ptr->height) { + return ERROR_JPEGR_RESOLUTION_MISMATCH; + } + if (yuv420_image_ptr->colorGamut == ULTRAHDR_COLORGAMUT_UNSPECIFIED || + p010_image_ptr->colorGamut == ULTRAHDR_COLORGAMUT_UNSPECIFIED) { + return ERROR_JPEGR_INVALID_COLORGAMUT; + } + + size_t image_width = yuv420_image_ptr->width; + size_t image_height = yuv420_image_ptr->height; + size_t map_width = image_width / kMapDimensionScaleFactor; + size_t map_height = image_height / kMapDimensionScaleFactor; + + dest->data = new uint8_t[map_width * map_height]; + dest->width = map_width; + dest->height = map_height; + dest->colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED; + dest->luma_stride = map_width; + dest->chroma_data = nullptr; + dest->chroma_stride = 0; + std::unique_ptr map_data; + map_data.reset(reinterpret_cast(dest->data)); + + ColorTransformFn hdrInvOetf = nullptr; + float hdr_white_nits; + switch (hdr_tf) { + case ULTRAHDR_TF_LINEAR: + hdrInvOetf = identityConversion; + // Note: this will produce clipping if the input exceeds kHlgMaxNits. + // TODO: TF LINEAR will be deprecated. + hdr_white_nits = kHlgMaxNits; + break; + case ULTRAHDR_TF_HLG: +#if USE_HLG_INVOETF_LUT + hdrInvOetf = hlgInvOetfLUT; +#else + hdrInvOetf = hlgInvOetf; +#endif + hdr_white_nits = kHlgMaxNits; + break; + case ULTRAHDR_TF_PQ: +#if USE_PQ_INVOETF_LUT + hdrInvOetf = pqInvOetfLUT; +#else + hdrInvOetf = pqInvOetf; +#endif + hdr_white_nits = kPqMaxNits; + break; + default: + // Should be impossible to hit after input validation. + return ERROR_JPEGR_INVALID_TRANS_FUNC; + } + + metadata->maxContentBoost = hdr_white_nits / kSdrWhiteNits; + metadata->minContentBoost = 1.0f; + metadata->gamma = 1.0f; + metadata->offsetSdr = 0.0f; + metadata->offsetHdr = 0.0f; + metadata->hdrCapacityMin = 1.0f; + metadata->hdrCapacityMax = metadata->maxContentBoost; + + float log2MinBoost = log2(metadata->minContentBoost); + float log2MaxBoost = log2(metadata->maxContentBoost); + + ColorTransformFn hdrGamutConversionFn = + getHdrConversionFn(yuv420_image_ptr->colorGamut, p010_image_ptr->colorGamut); + + ColorCalculationFn luminanceFn = nullptr; + ColorTransformFn sdrYuvToRgbFn = nullptr; + switch (yuv420_image_ptr->colorGamut) { + case ULTRAHDR_COLORGAMUT_BT709: + luminanceFn = srgbLuminance; + sdrYuvToRgbFn = srgbYuvToRgb; + break; + case ULTRAHDR_COLORGAMUT_P3: + luminanceFn = p3Luminance; + sdrYuvToRgbFn = p3YuvToRgb; + break; + case ULTRAHDR_COLORGAMUT_BT2100: + luminanceFn = bt2100Luminance; + sdrYuvToRgbFn = bt2100YuvToRgb; + break; + case ULTRAHDR_COLORGAMUT_UNSPECIFIED: + // Should be impossible to hit after input validation. + return ERROR_JPEGR_INVALID_COLORGAMUT; + } + if (sdr_is_601) { + sdrYuvToRgbFn = p3YuvToRgb; + } + + ColorTransformFn hdrYuvToRgbFn = nullptr; + switch (p010_image_ptr->colorGamut) { + case ULTRAHDR_COLORGAMUT_BT709: + hdrYuvToRgbFn = srgbYuvToRgb; + break; + case ULTRAHDR_COLORGAMUT_P3: + hdrYuvToRgbFn = p3YuvToRgb; + break; + case ULTRAHDR_COLORGAMUT_BT2100: + hdrYuvToRgbFn = bt2100YuvToRgb; + break; + case ULTRAHDR_COLORGAMUT_UNSPECIFIED: + // Should be impossible to hit after input validation. + return ERROR_JPEGR_INVALID_COLORGAMUT; + } + + const int threads = (std::min)(GetCPUCoreCount(), 4); + size_t rowStep = threads == 1 ? image_height : kJobSzInRows; + JobQueue jobQueue; + + std::function generateMap = [yuv420_image_ptr, p010_image_ptr, metadata, dest, hdrInvOetf, + hdrGamutConversionFn, luminanceFn, sdrYuvToRgbFn, + hdrYuvToRgbFn, hdr_white_nits, log2MinBoost, log2MaxBoost, + &jobQueue]() -> void { + size_t rowStart, rowEnd; + while (jobQueue.dequeueJob(rowStart, rowEnd)) { + for (size_t y = rowStart; y < rowEnd; ++y) { + for (size_t x = 0; x < dest->width; ++x) { + Color sdr_yuv_gamma = sampleYuv420(yuv420_image_ptr, kMapDimensionScaleFactor, x, y); + Color sdr_rgb_gamma = sdrYuvToRgbFn(sdr_yuv_gamma); + // We are assuming the SDR input is always sRGB transfer. +#if USE_SRGB_INVOETF_LUT + Color sdr_rgb = srgbInvOetfLUT(sdr_rgb_gamma); +#else + Color sdr_rgb = srgbInvOetf(sdr_rgb_gamma); +#endif + float sdr_y_nits = luminanceFn(sdr_rgb) * kSdrWhiteNits; + + Color hdr_yuv_gamma = sampleP010(p010_image_ptr, kMapDimensionScaleFactor, x, y); + Color hdr_rgb_gamma = hdrYuvToRgbFn(hdr_yuv_gamma); + Color hdr_rgb = hdrInvOetf(hdr_rgb_gamma); + hdr_rgb = hdrGamutConversionFn(hdr_rgb); + float hdr_y_nits = luminanceFn(hdr_rgb) * hdr_white_nits; + + size_t pixel_idx = x + y * dest->width; + reinterpret_cast(dest->data)[pixel_idx] = + encodeGain(sdr_y_nits, hdr_y_nits, metadata, log2MinBoost, log2MaxBoost); + } + } + } + }; + + // generate map + std::vector workers; + for (int th = 0; th < threads - 1; th++) { + workers.push_back(std::thread(generateMap)); + } + + rowStep = (threads == 1 ? image_height : kJobSzInRows) / kMapDimensionScaleFactor; + for (size_t rowStart = 0; rowStart < map_height;) { + size_t rowEnd = (std::min)(rowStart + rowStep, map_height); + jobQueue.enqueueJob(rowStart, rowEnd); + rowStart = rowEnd; + } + jobQueue.markQueueForEnd(); + generateMap(); + std::for_each(workers.begin(), workers.end(), [](std::thread& t) { t.join(); }); + + map_data.release(); + return JPEGR_NO_ERROR; +} + +status_t JpegR::applyGainMap(jr_uncompressed_ptr yuv420_image_ptr, + jr_uncompressed_ptr gainmap_image_ptr, ultrahdr_metadata_ptr metadata, + ultrahdr_output_format output_format, float max_display_boost, + jr_uncompressed_ptr dest) { + if (yuv420_image_ptr == nullptr || gainmap_image_ptr == nullptr || metadata == nullptr || + dest == nullptr || yuv420_image_ptr->data == nullptr || + yuv420_image_ptr->chroma_data == nullptr || gainmap_image_ptr->data == nullptr) { + return ERROR_JPEGR_BAD_PTR; + } + if (metadata->version.compare(kJpegrVersion)) { + ALOGE("Unsupported metadata version: %s", metadata->version.c_str()); + return ERROR_JPEGR_BAD_METADATA; + } + if (metadata->gamma != 1.0f) { + ALOGE("Unsupported metadata gamma: %f", metadata->gamma); + return ERROR_JPEGR_BAD_METADATA; + } + if (metadata->offsetSdr != 0.0f || metadata->offsetHdr != 0.0f) { + ALOGE("Unsupported metadata offset sdr, hdr: %f, %f", metadata->offsetSdr, metadata->offsetHdr); + return ERROR_JPEGR_BAD_METADATA; + } + if (metadata->hdrCapacityMin != metadata->minContentBoost || + metadata->hdrCapacityMax != metadata->maxContentBoost) { + ALOGE("Unsupported metadata hdr capacity min, max: %f, %f", metadata->hdrCapacityMin, + metadata->hdrCapacityMax); + return ERROR_JPEGR_BAD_METADATA; + } + + // TODO: remove once map scaling factor is computed based on actual map dims + size_t image_width = yuv420_image_ptr->width; + size_t image_height = yuv420_image_ptr->height; + size_t map_width = image_width / kMapDimensionScaleFactor; + size_t map_height = image_height / kMapDimensionScaleFactor; + if (map_width != gainmap_image_ptr->width || map_height != gainmap_image_ptr->height) { + ALOGE( + "gain map dimensions and primary image dimensions are not to scale, computed gain map " + "resolution is %zux%zu, received gain map resolution is %zux%zu", + map_width, map_height, gainmap_image_ptr->width, gainmap_image_ptr->height); + return ERROR_JPEGR_RESOLUTION_MISMATCH; + } + + dest->width = yuv420_image_ptr->width; + dest->height = yuv420_image_ptr->height; + ShepardsIDW idwTable(kMapDimensionScaleFactor); + float display_boost = (std::min)(max_display_boost, metadata->maxContentBoost); + GainLUT gainLUT(metadata, display_boost); + + JobQueue jobQueue; + std::function applyRecMap = [yuv420_image_ptr, gainmap_image_ptr, dest, &jobQueue, + &idwTable, output_format, &gainLUT, + display_boost]() -> void { + size_t width = yuv420_image_ptr->width; + + size_t rowStart, rowEnd; + while (jobQueue.dequeueJob(rowStart, rowEnd)) { + for (size_t y = rowStart; y < rowEnd; ++y) { + for (size_t x = 0; x < width; ++x) { + Color yuv_gamma_sdr = getYuv420Pixel(yuv420_image_ptr, x, y); + // Assuming the sdr image is a decoded JPEG, we should always use Rec.601 YUV coefficients + Color rgb_gamma_sdr = p3YuvToRgb(yuv_gamma_sdr); + // We are assuming the SDR base image is always sRGB transfer. +#if USE_SRGB_INVOETF_LUT + Color rgb_sdr = srgbInvOetfLUT(rgb_gamma_sdr); +#else + Color rgb_sdr = srgbInvOetf(rgb_gamma_sdr); +#endif + float gain; + // TODO: determine map scaling factor based on actual map dims + size_t map_scale_factor = kMapDimensionScaleFactor; + // TODO: If map_scale_factor is guaranteed to be an integer, then remove the following. + // Currently map_scale_factor is of type size_t, but it could be changed to a float + // later. + if (map_scale_factor != floorf(map_scale_factor)) { + gain = sampleMap(gainmap_image_ptr, map_scale_factor, x, y); + } else { + gain = sampleMap(gainmap_image_ptr, map_scale_factor, x, y, idwTable); + } + +#if USE_APPLY_GAIN_LUT + Color rgb_hdr = applyGainLUT(rgb_sdr, gain, gainLUT); +#else + Color rgb_hdr = applyGain(rgb_sdr, gain, metadata, display_boost); +#endif + rgb_hdr = rgb_hdr / display_boost; + size_t pixel_idx = x + y * width; + + switch (output_format) { + case ULTRAHDR_OUTPUT_HDR_LINEAR: { + uint64_t rgba_f16 = colorToRgbaF16(rgb_hdr); + reinterpret_cast(dest->data)[pixel_idx] = rgba_f16; + break; + } + case ULTRAHDR_OUTPUT_HDR_HLG: { +#if USE_HLG_OETF_LUT + ColorTransformFn hdrOetf = hlgOetfLUT; +#else + ColorTransformFn hdrOetf = hlgOetf; +#endif + Color rgb_gamma_hdr = hdrOetf(rgb_hdr); + uint32_t rgba_1010102 = colorToRgba1010102(rgb_gamma_hdr); + reinterpret_cast(dest->data)[pixel_idx] = rgba_1010102; + break; + } + case ULTRAHDR_OUTPUT_HDR_PQ: { +#if USE_PQ_OETF_LUT + ColorTransformFn hdrOetf = pqOetfLUT; +#else + ColorTransformFn hdrOetf = pqOetf; +#endif + Color rgb_gamma_hdr = hdrOetf(rgb_hdr); + uint32_t rgba_1010102 = colorToRgba1010102(rgb_gamma_hdr); + reinterpret_cast(dest->data)[pixel_idx] = rgba_1010102; + break; + } + default: { + } + // Should be impossible to hit after input validation. + } + } + } + } + }; + + const int threads = (std::min)(GetCPUCoreCount(), 4); + std::vector workers; + for (int th = 0; th < threads - 1; th++) { + workers.push_back(std::thread(applyRecMap)); + } + const int rowStep = threads == 1 ? yuv420_image_ptr->height : kJobSzInRows; + for (size_t rowStart = 0; rowStart < yuv420_image_ptr->height;) { + int rowEnd = (std::min)(rowStart + rowStep, yuv420_image_ptr->height); + jobQueue.enqueueJob(rowStart, rowEnd); + rowStart = rowEnd; + } + jobQueue.markQueueForEnd(); + applyRecMap(); + std::for_each(workers.begin(), workers.end(), [](std::thread& t) { t.join(); }); + return JPEGR_NO_ERROR; +} + +status_t JpegR::extractPrimaryImageAndGainMap(jr_compressed_ptr jpegr_image_ptr, + jr_compressed_ptr primary_jpg_image_ptr, + jr_compressed_ptr gainmap_jpg_image_ptr) { + if (jpegr_image_ptr == nullptr) { + return ERROR_JPEGR_BAD_PTR; + } + + MessageHandler msg_handler; + msg_handler.SetMessageWriter(make_unique(AlogMessageWriter())); + std::shared_ptr seg = DataSegment::Create( + DataRange(0, jpegr_image_ptr->length), static_cast(jpegr_image_ptr->data), + DataSegment::BufferDispositionPolicy::kDontDelete); + DataSegmentDataSource data_source(seg); + JpegInfoBuilder jpeg_info_builder; + jpeg_info_builder.SetImageLimit(2); + JpegScanner jpeg_scanner(&msg_handler); + jpeg_scanner.Run(&data_source, &jpeg_info_builder); + data_source.Reset(); + + if (jpeg_scanner.HasError()) { + return JPEGR_UNKNOWN_ERROR; + } + + const auto& jpeg_info = jpeg_info_builder.GetInfo(); + const auto& image_ranges = jpeg_info.GetImageRanges(); + + if (image_ranges.empty()) { + return ERROR_JPEGR_NO_IMAGES_FOUND; + } + + if (primary_jpg_image_ptr != nullptr) { + primary_jpg_image_ptr->data = + static_cast(jpegr_image_ptr->data) + image_ranges[0].GetBegin(); + primary_jpg_image_ptr->length = image_ranges[0].GetLength(); + } + + if (image_ranges.size() == 1) { + return ERROR_JPEGR_GAIN_MAP_IMAGE_NOT_FOUND; + } + + if (gainmap_jpg_image_ptr != nullptr) { + gainmap_jpg_image_ptr->data = + static_cast(jpegr_image_ptr->data) + image_ranges[1].GetBegin(); + gainmap_jpg_image_ptr->length = image_ranges[1].GetLength(); + } + + // TODO: choose primary image and gain map image carefully + if (image_ranges.size() > 2) { + ALOGW("Number of jpeg images present %d, primary, gain map images may not be correctly chosen", + (int)image_ranges.size()); + } + + return JPEGR_NO_ERROR; +} + +// JPEG/R structure: +// SOI (ff d8) +// +// (Optional, if EXIF package is from outside (Encode API-0 API-1), or if EXIF package presents +// in the JPEG input (Encode API-2, API-3, API-4)) +// APP1 (ff e1) +// 2 bytes of length (2 + length of exif package) +// EXIF package (this includes the first two bytes representing the package length) +// +// (Required, XMP package) APP1 (ff e1) +// 2 bytes of length (2 + 29 + length of xmp package) +// name space ("http://ns.adobe.com/xap/1.0/\0") +// XMP +// +// (Required, MPF package) APP2 (ff e2) +// 2 bytes of length +// MPF +// +// (Required) primary image (without the first two bytes (SOI) and EXIF, may have other packages) +// +// SOI (ff d8) +// +// (Required, XMP package) APP1 (ff e1) +// 2 bytes of length (2 + 29 + length of xmp package) +// name space ("http://ns.adobe.com/xap/1.0/\0") +// XMP +// +// (Required) secondary image (the gain map, without the first two bytes (SOI)) +// +// Metadata versions we are using: +// ECMA TR-98 for JFIF marker +// Exif 2.2 spec for EXIF marker +// Adobe XMP spec part 3 for XMP marker +// ICC v4.3 spec for ICC +status_t JpegR::appendGainMap(jr_compressed_ptr primary_jpg_image_ptr, + jr_compressed_ptr gainmap_jpg_image_ptr, jr_exif_ptr pExif, + void* pIcc, size_t icc_size, ultrahdr_metadata_ptr metadata, + jr_compressed_ptr dest) { + if (primary_jpg_image_ptr == nullptr || gainmap_jpg_image_ptr == nullptr || metadata == nullptr || + dest == nullptr) { + return ERROR_JPEGR_BAD_PTR; + } + if (metadata->version.compare("1.0")) { + ALOGE("received bad value for version: %s", metadata->version.c_str()); + return ERROR_JPEGR_BAD_METADATA; + } + if (metadata->maxContentBoost < metadata->minContentBoost) { + ALOGE("received bad value for content boost min %f, max %f", metadata->minContentBoost, + metadata->maxContentBoost); + return ERROR_JPEGR_BAD_METADATA; + } + if (metadata->hdrCapacityMax < metadata->hdrCapacityMin || metadata->hdrCapacityMin < 1.0f) { + ALOGE("received bad value for hdr capacity min %f, max %f", metadata->hdrCapacityMin, + metadata->hdrCapacityMax); + return ERROR_JPEGR_BAD_METADATA; + } + if (metadata->offsetSdr < 0.0f || metadata->offsetHdr < 0.0f) { + ALOGE("received bad value for offset sdr %f, hdr %f", metadata->offsetSdr, metadata->offsetHdr); + return ERROR_JPEGR_BAD_METADATA; + } + if (metadata->gamma <= 0.0f) { + ALOGE("received bad value for gamma %f", metadata->gamma); + return ERROR_JPEGR_BAD_METADATA; + } + + const string nameSpace = "http://ns.adobe.com/xap/1.0/"; + const int nameSpaceLength = nameSpace.size() + 1; // need to count the null terminator + + // calculate secondary image length first, because the length will be written into the primary + // image xmp + const string xmp_secondary = generateXmpForSecondaryImage(*metadata); + // xmp_secondary_length = 2 bytes representing the length of the package + + // + nameSpaceLength = 29 bytes length + // + length of xmp packet = xmp_secondary.size() + const int xmp_secondary_length = 2 + nameSpaceLength + xmp_secondary.size(); + const int secondary_image_size = 2 /* 2 bytes length of APP1 sign */ + + xmp_secondary_length + gainmap_jpg_image_ptr->length; + // primary image + const string xmp_primary = generateXmpForPrimaryImage(secondary_image_size, *metadata); + // same as primary + const int xmp_primary_length = 2 + nameSpaceLength + xmp_primary.size(); + + // Check if EXIF package presents in the JPEG input. + // If so, extract and remove the EXIF package. + JpegDecoderHelper decoder; + if (!decoder.extractEXIF(primary_jpg_image_ptr->data, primary_jpg_image_ptr->length)) { + return ERROR_JPEGR_DECODE_ERROR; + } + jpegr_exif_struct exif_from_jpg; + exif_from_jpg.data = nullptr; + exif_from_jpg.length = 0; + jpegr_compressed_struct new_jpg_image; + new_jpg_image.data = nullptr; + new_jpg_image.length = 0; + new_jpg_image.maxLength = 0; + new_jpg_image.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED; + std::unique_ptr dest_data; + if (decoder.getEXIFPos() >= 0) { + if (pExif != nullptr) { + ALOGE("received EXIF from outside while the primary image already contains EXIF"); + return ERROR_JPEGR_MULTIPLE_EXIFS_RECEIVED; + } + copyJpegWithoutExif(&new_jpg_image, primary_jpg_image_ptr, decoder.getEXIFPos(), + decoder.getEXIFSize()); + dest_data.reset(reinterpret_cast(new_jpg_image.data)); + exif_from_jpg.data = decoder.getEXIFPtr(); + exif_from_jpg.length = decoder.getEXIFSize(); + pExif = &exif_from_jpg; + } + + jr_compressed_ptr final_primary_jpg_image_ptr = + new_jpg_image.length == 0 ? primary_jpg_image_ptr : &new_jpg_image; + + int pos = 0; + // Begin primary image + // Write SOI + JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos)); + JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kSOI, 1, pos)); + + // Write EXIF + if (pExif != nullptr) { + const int length = 2 + pExif->length; + const uint8_t lengthH = ((length >> 8) & 0xff); + const uint8_t lengthL = (length & 0xff); + JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos)); + JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP1, 1, pos)); + JPEGR_CHECK(Write(dest, &lengthH, 1, pos)); + JPEGR_CHECK(Write(dest, &lengthL, 1, pos)); + JPEGR_CHECK(Write(dest, pExif->data, pExif->length, pos)); + } + + // Prepare and write XMP + { + const int length = xmp_primary_length; + const uint8_t lengthH = ((length >> 8) & 0xff); + const uint8_t lengthL = (length & 0xff); + JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos)); + JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP1, 1, pos)); + JPEGR_CHECK(Write(dest, &lengthH, 1, pos)); + JPEGR_CHECK(Write(dest, &lengthL, 1, pos)); + JPEGR_CHECK(Write(dest, (void*)nameSpace.c_str(), nameSpaceLength, pos)); + JPEGR_CHECK(Write(dest, (void*)xmp_primary.c_str(), xmp_primary.size(), pos)); + } + + // Write ICC + if (pIcc != nullptr && icc_size > 0) { + const int length = icc_size + 2; + const uint8_t lengthH = ((length >> 8) & 0xff); + const uint8_t lengthL = (length & 0xff); + JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos)); + JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP2, 1, pos)); + JPEGR_CHECK(Write(dest, &lengthH, 1, pos)); + JPEGR_CHECK(Write(dest, &lengthL, 1, pos)); + JPEGR_CHECK(Write(dest, pIcc, icc_size, pos)); + } + + // Prepare and write MPF + { + const int length = 2 + calculateMpfSize(); + const uint8_t lengthH = ((length >> 8) & 0xff); + const uint8_t lengthL = (length & 0xff); + int primary_image_size = pos + length + final_primary_jpg_image_ptr->length; + // between APP2 + package size + signature + // ff e2 00 58 4d 50 46 00 + // 2 + 2 + 4 = 8 (bytes) + // and ff d8 sign of the secondary image + int secondary_image_offset = primary_image_size - pos - 8; + std::shared_ptr mpf = generateMpf(primary_image_size, 0, /* primary_image_offset */ + secondary_image_size, secondary_image_offset); + JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos)); + JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP2, 1, pos)); + JPEGR_CHECK(Write(dest, &lengthH, 1, pos)); + JPEGR_CHECK(Write(dest, &lengthL, 1, pos)); + JPEGR_CHECK(Write(dest, (void*)mpf->getData(), mpf->getLength(), pos)); + } + + // Write primary image + JPEGR_CHECK(Write(dest, (uint8_t*)final_primary_jpg_image_ptr->data + 2, + final_primary_jpg_image_ptr->length - 2, pos)); + // Finish primary image + + // Begin secondary image (gain map) + // Write SOI + JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos)); + JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kSOI, 1, pos)); + + // Prepare and write XMP + { + const int length = xmp_secondary_length; + const uint8_t lengthH = ((length >> 8) & 0xff); + const uint8_t lengthL = (length & 0xff); + JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos)); + JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP1, 1, pos)); + JPEGR_CHECK(Write(dest, &lengthH, 1, pos)); + JPEGR_CHECK(Write(dest, &lengthL, 1, pos)); + JPEGR_CHECK(Write(dest, (void*)nameSpace.c_str(), nameSpaceLength, pos)); + JPEGR_CHECK(Write(dest, (void*)xmp_secondary.c_str(), xmp_secondary.size(), pos)); + } + + // Write secondary image + JPEGR_CHECK(Write(dest, (uint8_t*)gainmap_jpg_image_ptr->data + 2, + gainmap_jpg_image_ptr->length - 2, pos)); + + // Set back length + dest->length = pos; + + // Done! + return JPEGR_NO_ERROR; +} + +status_t JpegR::toneMap(jr_uncompressed_ptr src, jr_uncompressed_ptr dest) { + if (src == nullptr || dest == nullptr) { + return ERROR_JPEGR_BAD_PTR; + } + if (src->width != dest->width || src->height != dest->height) { + return ERROR_JPEGR_RESOLUTION_MISMATCH; + } + uint16_t* src_y_data = reinterpret_cast(src->data); + uint8_t* dst_y_data = reinterpret_cast(dest->data); + for (size_t y = 0; y < src->height; ++y) { + uint16_t* src_y_row = src_y_data + y * src->luma_stride; + uint8_t* dst_y_row = dst_y_data + y * dest->luma_stride; + for (size_t x = 0; x < src->width; ++x) { + uint16_t y_uint = src_y_row[x] >> 6; + dst_y_row[x] = static_cast((y_uint >> 2) & 0xff); + } + if (dest->width != dest->luma_stride) { + memset(dst_y_row + dest->width, 0, dest->luma_stride - dest->width); + } + } + uint16_t* src_uv_data = reinterpret_cast(src->chroma_data); + uint8_t* dst_u_data = reinterpret_cast(dest->chroma_data); + size_t dst_v_offset = (dest->chroma_stride * dest->height / 2); + uint8_t* dst_v_data = dst_u_data + dst_v_offset; + for (size_t y = 0; y < src->height / 2; ++y) { + uint16_t* src_uv_row = src_uv_data + y * src->chroma_stride; + uint8_t* dst_u_row = dst_u_data + y * dest->chroma_stride; + uint8_t* dst_v_row = dst_v_data + y * dest->chroma_stride; + for (size_t x = 0; x < src->width / 2; ++x) { + uint16_t u_uint = src_uv_row[x << 1] >> 6; + uint16_t v_uint = src_uv_row[(x << 1) + 1] >> 6; + dst_u_row[x] = static_cast((u_uint >> 2) & 0xff); + dst_v_row[x] = static_cast((v_uint >> 2) & 0xff); + } + if (dest->width / 2 != dest->chroma_stride) { + memset(dst_u_row + dest->width / 2, 0, dest->chroma_stride - dest->width / 2); + memset(dst_v_row + dest->width / 2, 0, dest->chroma_stride - dest->width / 2); + } + } + dest->colorGamut = src->colorGamut; + return JPEGR_NO_ERROR; +} + +status_t JpegR::convertYuv(jr_uncompressed_ptr image, ultrahdr_color_gamut src_encoding, + ultrahdr_color_gamut dest_encoding) { + if (image == nullptr) { + return ERROR_JPEGR_BAD_PTR; + } + if (src_encoding == ULTRAHDR_COLORGAMUT_UNSPECIFIED || + dest_encoding == ULTRAHDR_COLORGAMUT_UNSPECIFIED) { + return ERROR_JPEGR_INVALID_COLORGAMUT; + } + + ColorTransformFn conversionFn = nullptr; + switch (src_encoding) { + case ULTRAHDR_COLORGAMUT_BT709: + switch (dest_encoding) { + case ULTRAHDR_COLORGAMUT_BT709: + return JPEGR_NO_ERROR; + case ULTRAHDR_COLORGAMUT_P3: + conversionFn = yuv709To601; + break; + case ULTRAHDR_COLORGAMUT_BT2100: + conversionFn = yuv709To2100; + break; + default: + // Should be impossible to hit after input validation + return ERROR_JPEGR_INVALID_COLORGAMUT; + } + break; + case ULTRAHDR_COLORGAMUT_P3: + switch (dest_encoding) { + case ULTRAHDR_COLORGAMUT_BT709: + conversionFn = yuv601To709; + break; + case ULTRAHDR_COLORGAMUT_P3: + return JPEGR_NO_ERROR; + case ULTRAHDR_COLORGAMUT_BT2100: + conversionFn = yuv601To2100; + break; + default: + // Should be impossible to hit after input validation + return ERROR_JPEGR_INVALID_COLORGAMUT; + } + break; + case ULTRAHDR_COLORGAMUT_BT2100: + switch (dest_encoding) { + case ULTRAHDR_COLORGAMUT_BT709: + conversionFn = yuv2100To709; + break; + case ULTRAHDR_COLORGAMUT_P3: + conversionFn = yuv2100To601; + break; + case ULTRAHDR_COLORGAMUT_BT2100: + return JPEGR_NO_ERROR; + default: + // Should be impossible to hit after input validation + return ERROR_JPEGR_INVALID_COLORGAMUT; + } + break; + default: + // Should be impossible to hit after input validation + return ERROR_JPEGR_INVALID_COLORGAMUT; + } + + if (conversionFn == nullptr) { + // Should be impossible to hit after input validation + return ERROR_JPEGR_INVALID_COLORGAMUT; + } + + for (size_t y = 0; y < image->height / 2; ++y) { + for (size_t x = 0; x < image->width / 2; ++x) { + transformYuv420(image, x, y, conversionFn); + } + } + + return JPEGR_NO_ERROR; +} + +} // namespace ultrahdr diff --git a/lib/src/jpegrutils.cpp b/lib/src/jpegrutils.cpp new file mode 100644 index 0000000..77cb26b --- /dev/null +++ b/lib/src/jpegrutils.cpp @@ -0,0 +1,583 @@ +/* + * Copyright 2022 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "ultrahdr/ultrahdrcommon.h" +#include "ultrahdr/jpegr.h" +#include "ultrahdr/jpegrutils.h" + +#include "image_io/xml/xml_reader.h" +#include "image_io/xml/xml_writer.h" +#include "image_io/base/message_handler.h" +#include "image_io/xml/xml_element_rules.h" +#include "image_io/xml/xml_handler.h" +#include "image_io/xml/xml_rule.h" + +using namespace photos_editing_formats::image_io; +using namespace std; + +namespace ultrahdr { +/* + * Helper function used for generating XMP metadata. + * + * @param prefix The prefix part of the name. + * @param suffix The suffix part of the name. + * @return A name of the form "prefix:suffix". + */ +static inline string Name(const string& prefix, const string& suffix) { + std::stringstream ss; + ss << prefix << ":" << suffix; + return ss.str(); +} + +DataStruct::DataStruct(int s) { + data = malloc(s); + length = s; + memset(data, 0, s); + writePos = 0; +} + +DataStruct::~DataStruct() { + if (data != nullptr) { + free(data); + } +} + +void* DataStruct::getData() { return data; } + +int DataStruct::getLength() { return length; } + +int DataStruct::getBytesWritten() { return writePos; } + +bool DataStruct::write8(uint8_t value) { + uint8_t v = value; + return write(&v, 1); +} + +bool DataStruct::write16(uint16_t value) { + uint16_t v = value; + return write(&v, 2); +} +bool DataStruct::write32(uint32_t value) { + uint32_t v = value; + return write(&v, 4); +} + +bool DataStruct::write(const void* src, int size) { + if (writePos + size > length) { + ALOGE("Writing out of boundary: write position: %d, size: %d, capacity: %d", writePos, size, + length); + return false; + } + memcpy((uint8_t*)data + writePos, src, size); + writePos += size; + return true; +} + +/* + * Helper function used for writing data to destination. + */ +status_t Write(jr_compressed_ptr destination, const void* source, size_t length, int& position) { + if (position + length > destination->maxLength) { + return ERROR_JPEGR_BUFFER_TOO_SMALL; + } + + memcpy((uint8_t*)destination->data + sizeof(uint8_t) * position, source, length); + position += length; + return JPEGR_NO_ERROR; +} + +// Extremely simple XML Handler - just searches for interesting elements +class XMPXmlHandler : public XmlHandler { + public: + XMPXmlHandler() : XmlHandler() { + state = NotStrarted; + versionFound = false; + minContentBoostFound = false; + maxContentBoostFound = false; + gammaFound = false; + offsetSdrFound = false; + offsetHdrFound = false; + hdrCapacityMinFound = false; + hdrCapacityMaxFound = false; + baseRenditionIsHdrFound = false; + } + + enum ParseState { NotStrarted, Started, Done }; + + virtual DataMatchResult StartElement(const XmlTokenContext& context) { + string val; + if (context.BuildTokenValue(&val)) { + if (!val.compare(containerName)) { + state = Started; + } else { + if (state != Done) { + state = NotStrarted; + } + } + } + return context.GetResult(); + } + + virtual DataMatchResult FinishElement(const XmlTokenContext& context) { + if (state == Started) { + state = Done; + lastAttributeName = ""; + } + return context.GetResult(); + } + + virtual DataMatchResult AttributeName(const XmlTokenContext& context) { + string val; + if (state == Started) { + if (context.BuildTokenValue(&val)) { + if (!val.compare(versionAttrName)) { + lastAttributeName = versionAttrName; + } else if (!val.compare(maxContentBoostAttrName)) { + lastAttributeName = maxContentBoostAttrName; + } else if (!val.compare(minContentBoostAttrName)) { + lastAttributeName = minContentBoostAttrName; + } else if (!val.compare(gammaAttrName)) { + lastAttributeName = gammaAttrName; + } else if (!val.compare(offsetSdrAttrName)) { + lastAttributeName = offsetSdrAttrName; + } else if (!val.compare(offsetHdrAttrName)) { + lastAttributeName = offsetHdrAttrName; + } else if (!val.compare(hdrCapacityMinAttrName)) { + lastAttributeName = hdrCapacityMinAttrName; + } else if (!val.compare(hdrCapacityMaxAttrName)) { + lastAttributeName = hdrCapacityMaxAttrName; + } else if (!val.compare(baseRenditionIsHdrAttrName)) { + lastAttributeName = baseRenditionIsHdrAttrName; + } else { + lastAttributeName = ""; + } + } + } + return context.GetResult(); + } + + virtual DataMatchResult AttributeValue(const XmlTokenContext& context) { + string val; + if (state == Started) { + if (context.BuildTokenValue(&val, true)) { + if (!lastAttributeName.compare(versionAttrName)) { + versionStr = val; + versionFound = true; + } else if (!lastAttributeName.compare(maxContentBoostAttrName)) { + maxContentBoostStr = val; + maxContentBoostFound = true; + } else if (!lastAttributeName.compare(minContentBoostAttrName)) { + minContentBoostStr = val; + minContentBoostFound = true; + } else if (!lastAttributeName.compare(gammaAttrName)) { + gammaStr = val; + gammaFound = true; + } else if (!lastAttributeName.compare(offsetSdrAttrName)) { + offsetSdrStr = val; + offsetSdrFound = true; + } else if (!lastAttributeName.compare(offsetHdrAttrName)) { + offsetHdrStr = val; + offsetHdrFound = true; + } else if (!lastAttributeName.compare(hdrCapacityMinAttrName)) { + hdrCapacityMinStr = val; + hdrCapacityMinFound = true; + } else if (!lastAttributeName.compare(hdrCapacityMaxAttrName)) { + hdrCapacityMaxStr = val; + hdrCapacityMaxFound = true; + } else if (!lastAttributeName.compare(baseRenditionIsHdrAttrName)) { + baseRenditionIsHdrStr = val; + baseRenditionIsHdrFound = true; + } + } + } + return context.GetResult(); + } + + bool getVersion(string* version, bool* present) { + if (state == Done) { + *version = versionStr; + *present = versionFound; + return true; + } else { + return false; + } + } + + bool getMaxContentBoost(float* max_content_boost, bool* present) { + if (state == Done) { + *present = maxContentBoostFound; + stringstream ss(maxContentBoostStr); + float val; + if (ss >> val) { + *max_content_boost = exp2(val); + return true; + } else { + return false; + } + } else { + return false; + } + } + + bool getMinContentBoost(float* min_content_boost, bool* present) { + if (state == Done) { + *present = minContentBoostFound; + stringstream ss(minContentBoostStr); + float val; + if (ss >> val) { + *min_content_boost = exp2(val); + return true; + } else { + return false; + } + } else { + return false; + } + } + + bool getGamma(float* gamma, bool* present) { + if (state == Done) { + *present = gammaFound; + stringstream ss(gammaStr); + float val; + if (ss >> val) { + *gamma = val; + return true; + } else { + return false; + } + } else { + return false; + } + } + + bool getOffsetSdr(float* offset_sdr, bool* present) { + if (state == Done) { + *present = offsetSdrFound; + stringstream ss(offsetSdrStr); + float val; + if (ss >> val) { + *offset_sdr = val; + return true; + } else { + return false; + } + } else { + return false; + } + } + + bool getOffsetHdr(float* offset_hdr, bool* present) { + if (state == Done) { + *present = offsetHdrFound; + stringstream ss(offsetHdrStr); + float val; + if (ss >> val) { + *offset_hdr = val; + return true; + } else { + return false; + } + } else { + return false; + } + } + + bool getHdrCapacityMin(float* hdr_capacity_min, bool* present) { + if (state == Done) { + *present = hdrCapacityMinFound; + stringstream ss(hdrCapacityMinStr); + float val; + if (ss >> val) { + *hdr_capacity_min = exp2(val); + return true; + } else { + return false; + } + } else { + return false; + } + } + + bool getHdrCapacityMax(float* hdr_capacity_max, bool* present) { + if (state == Done) { + *present = hdrCapacityMaxFound; + stringstream ss(hdrCapacityMaxStr); + float val; + if (ss >> val) { + *hdr_capacity_max = exp2(val); + return true; + } else { + return false; + } + } else { + return false; + } + } + + bool getBaseRenditionIsHdr(bool* base_rendition_is_hdr, bool* present) { + if (state == Done) { + *present = baseRenditionIsHdrFound; + if (!baseRenditionIsHdrStr.compare("False")) { + *base_rendition_is_hdr = false; + return true; + } else if (!baseRenditionIsHdrStr.compare("True")) { + *base_rendition_is_hdr = true; + return true; + } else { + return false; + } + } else { + return false; + } + } + + private: + static const string containerName; + + static const string versionAttrName; + string versionStr; + bool versionFound; + static const string maxContentBoostAttrName; + string maxContentBoostStr; + bool maxContentBoostFound; + static const string minContentBoostAttrName; + string minContentBoostStr; + bool minContentBoostFound; + static const string gammaAttrName; + string gammaStr; + bool gammaFound; + static const string offsetSdrAttrName; + string offsetSdrStr; + bool offsetSdrFound; + static const string offsetHdrAttrName; + string offsetHdrStr; + bool offsetHdrFound; + static const string hdrCapacityMinAttrName; + string hdrCapacityMinStr; + bool hdrCapacityMinFound; + static const string hdrCapacityMaxAttrName; + string hdrCapacityMaxStr; + bool hdrCapacityMaxFound; + static const string baseRenditionIsHdrAttrName; + string baseRenditionIsHdrStr; + bool baseRenditionIsHdrFound; + + string lastAttributeName; + ParseState state; +}; + +// GContainer XMP constants - URI and namespace prefix +const string kContainerUri = "http://ns.google.com/photos/1.0/container/"; +const string kContainerPrefix = "Container"; + +// GContainer XMP constants - element and attribute names +const string kConDirectory = Name(kContainerPrefix, "Directory"); +const string kConItem = Name(kContainerPrefix, "Item"); + +// GContainer XMP constants - names for XMP handlers +const string XMPXmlHandler::containerName = "rdf:Description"; +// Item XMP constants - URI and namespace prefix +const string kItemUri = "http://ns.google.com/photos/1.0/container/item/"; +const string kItemPrefix = "Item"; + +// Item XMP constants - element and attribute names +const string kItemLength = Name(kItemPrefix, "Length"); +const string kItemMime = Name(kItemPrefix, "Mime"); +const string kItemSemantic = Name(kItemPrefix, "Semantic"); + +// Item XMP constants - element and attribute values +const string kSemanticPrimary = "Primary"; +const string kSemanticGainMap = "GainMap"; +const string kMimeImageJpeg = "image/jpeg"; + +// GainMap XMP constants - URI and namespace prefix +const string kGainMapUri = "http://ns.adobe.com/hdr-gain-map/1.0/"; +const string kGainMapPrefix = "hdrgm"; + +// GainMap XMP constants - element and attribute names +const string kMapVersion = Name(kGainMapPrefix, "Version"); +const string kMapGainMapMin = Name(kGainMapPrefix, "GainMapMin"); +const string kMapGainMapMax = Name(kGainMapPrefix, "GainMapMax"); +const string kMapGamma = Name(kGainMapPrefix, "Gamma"); +const string kMapOffsetSdr = Name(kGainMapPrefix, "OffsetSDR"); +const string kMapOffsetHdr = Name(kGainMapPrefix, "OffsetHDR"); +const string kMapHDRCapacityMin = Name(kGainMapPrefix, "HDRCapacityMin"); +const string kMapHDRCapacityMax = Name(kGainMapPrefix, "HDRCapacityMax"); +const string kMapBaseRenditionIsHDR = Name(kGainMapPrefix, "BaseRenditionIsHDR"); + +// GainMap XMP constants - names for XMP handlers +const string XMPXmlHandler::versionAttrName = kMapVersion; +const string XMPXmlHandler::minContentBoostAttrName = kMapGainMapMin; +const string XMPXmlHandler::maxContentBoostAttrName = kMapGainMapMax; +const string XMPXmlHandler::gammaAttrName = kMapGamma; +const string XMPXmlHandler::offsetSdrAttrName = kMapOffsetSdr; +const string XMPXmlHandler::offsetHdrAttrName = kMapOffsetHdr; +const string XMPXmlHandler::hdrCapacityMinAttrName = kMapHDRCapacityMin; +const string XMPXmlHandler::hdrCapacityMaxAttrName = kMapHDRCapacityMax; +const string XMPXmlHandler::baseRenditionIsHdrAttrName = kMapBaseRenditionIsHDR; + +bool getMetadataFromXMP(uint8_t* xmp_data, size_t xmp_size, ultrahdr_metadata_struct* metadata) { + string nameSpace = "http://ns.adobe.com/xap/1.0/\0"; + + if (xmp_size < nameSpace.size() + 2) { + // Data too short + return false; + } + + if (strncmp(reinterpret_cast(xmp_data), nameSpace.c_str(), nameSpace.size())) { + // Not correct namespace + return false; + } + + // Position the pointers to the start of XMP XML portion + xmp_data += nameSpace.size() + 1; + xmp_size -= nameSpace.size() + 1; + XMPXmlHandler handler; + + // We need to remove tail data until the closing tag. Otherwise parser will throw an error. + while (xmp_data[xmp_size - 1] != '>' && xmp_size > 1) { + xmp_size--; + } + + string str(reinterpret_cast(xmp_data), xmp_size); + MessageHandler msg_handler; + unique_ptr rule(new XmlElementRule); + XmlReader reader(&handler, &msg_handler); + reader.StartParse(std::move(rule)); + reader.Parse(str); + reader.FinishParse(); + if (reader.HasErrors()) { + // Parse error + return false; + } + + // Apply default values to any not-present fields, except for Version, + // maxContentBoost, and hdrCapacityMax, which are required. Return false if + // we encounter a present field that couldn't be parsed, since this + // indicates it is invalid (eg. string where there should be a float). + bool present = false; + if (!handler.getVersion(&metadata->version, &present) || !present) { + return false; + } + if (!handler.getMaxContentBoost(&metadata->maxContentBoost, &present) || !present) { + return false; + } + if (!handler.getHdrCapacityMax(&metadata->hdrCapacityMax, &present) || !present) { + return false; + } + if (!handler.getMinContentBoost(&metadata->minContentBoost, &present)) { + if (present) return false; + metadata->minContentBoost = 1.0f; + } + if (!handler.getGamma(&metadata->gamma, &present)) { + if (present) return false; + metadata->gamma = 1.0f; + } + if (!handler.getOffsetSdr(&metadata->offsetSdr, &present)) { + if (present) return false; + metadata->offsetSdr = 1.0f / 64.0f; + } + if (!handler.getOffsetHdr(&metadata->offsetHdr, &present)) { + if (present) return false; + metadata->offsetHdr = 1.0f / 64.0f; + } + if (!handler.getHdrCapacityMin(&metadata->hdrCapacityMin, &present)) { + if (present) return false; + metadata->hdrCapacityMin = 1.0f; + } + + bool base_rendition_is_hdr; + if (!handler.getBaseRenditionIsHdr(&base_rendition_is_hdr, &present)) { + if (present) return false; + base_rendition_is_hdr = false; + } + if (base_rendition_is_hdr) { + ALOGE("Base rendition of HDR is not supported!"); + return false; + } + + return true; +} + +string generateXmpForPrimaryImage(int secondary_image_length, ultrahdr_metadata_struct& metadata) { + const vector kConDirSeq({kConDirectory, string("rdf:Seq")}); + const vector kLiItem({string("rdf:li"), kConItem}); + + std::stringstream ss; + photos_editing_formats::image_io::XmlWriter writer(ss); + writer.StartWritingElement("x:xmpmeta"); + writer.WriteXmlns("x", "adobe:ns:meta/"); + writer.WriteAttributeNameAndValue("x:xmptk", "Adobe XMP Core 5.1.2"); + writer.StartWritingElement("rdf:RDF"); + writer.WriteXmlns("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#"); + writer.StartWritingElement("rdf:Description"); + writer.WriteXmlns(kContainerPrefix, kContainerUri); + writer.WriteXmlns(kItemPrefix, kItemUri); + writer.WriteXmlns(kGainMapPrefix, kGainMapUri); + writer.WriteAttributeNameAndValue(kMapVersion, metadata.version); + + writer.StartWritingElements(kConDirSeq); + + size_t item_depth = writer.StartWritingElement("rdf:li"); + writer.WriteAttributeNameAndValue("rdf:parseType", "Resource"); + writer.StartWritingElement(kConItem); + writer.WriteAttributeNameAndValue(kItemSemantic, kSemanticPrimary); + writer.WriteAttributeNameAndValue(kItemMime, kMimeImageJpeg); + writer.FinishWritingElementsToDepth(item_depth); + + writer.StartWritingElement("rdf:li"); + writer.WriteAttributeNameAndValue("rdf:parseType", "Resource"); + writer.StartWritingElement(kConItem); + writer.WriteAttributeNameAndValue(kItemSemantic, kSemanticGainMap); + writer.WriteAttributeNameAndValue(kItemMime, kMimeImageJpeg); + writer.WriteAttributeNameAndValue(kItemLength, secondary_image_length); + + writer.FinishWriting(); + + return ss.str(); +} + +string generateXmpForSecondaryImage(ultrahdr_metadata_struct& metadata) { + const vector kConDirSeq({kConDirectory, string("rdf:Seq")}); + + std::stringstream ss; + photos_editing_formats::image_io::XmlWriter writer(ss); + writer.StartWritingElement("x:xmpmeta"); + writer.WriteXmlns("x", "adobe:ns:meta/"); + writer.WriteAttributeNameAndValue("x:xmptk", "Adobe XMP Core 5.1.2"); + writer.StartWritingElement("rdf:RDF"); + writer.WriteXmlns("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#"); + writer.StartWritingElement("rdf:Description"); + writer.WriteXmlns(kGainMapPrefix, kGainMapUri); + writer.WriteAttributeNameAndValue(kMapVersion, metadata.version); + writer.WriteAttributeNameAndValue(kMapGainMapMin, log2(metadata.minContentBoost)); + writer.WriteAttributeNameAndValue(kMapGainMapMax, log2(metadata.maxContentBoost)); + writer.WriteAttributeNameAndValue(kMapGamma, metadata.gamma); + writer.WriteAttributeNameAndValue(kMapOffsetSdr, metadata.offsetSdr); + writer.WriteAttributeNameAndValue(kMapOffsetHdr, metadata.offsetHdr); + writer.WriteAttributeNameAndValue(kMapHDRCapacityMin, log2(metadata.hdrCapacityMin)); + writer.WriteAttributeNameAndValue(kMapHDRCapacityMax, log2(metadata.hdrCapacityMax)); + writer.WriteAttributeNameAndValue(kMapBaseRenditionIsHDR, "False"); + writer.FinishWriting(); + + return ss.str(); +} + +} // namespace ultrahdr diff --git a/lib/src/multipictureformat.cpp b/lib/src/multipictureformat.cpp new file mode 100644 index 0000000..59efc66 --- /dev/null +++ b/lib/src/multipictureformat.cpp @@ -0,0 +1,92 @@ +/* + * Copyright 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ultrahdr/multipictureformat.h" + +namespace ultrahdr { +size_t calculateMpfSize() { + return sizeof(kMpfSig) + // Signature + kMpEndianSize + // Endianness + sizeof(uint32_t) + // Index IFD Offset + sizeof(uint16_t) + // Tag count + kTagSerializedCount * kTagSize + // 3 tags at 12 bytes each + sizeof(uint32_t) + // Attribute IFD offset + kNumPictures * kMPEntrySize; // MP Entries for each image +} + +std::shared_ptr generateMpf(int primary_image_size, int primary_image_offset, + int secondary_image_size, int secondary_image_offset) { + size_t mpf_size = calculateMpfSize(); + std::shared_ptr dataStruct = std::make_shared(mpf_size); + + dataStruct->write(static_cast(kMpfSig), sizeof(kMpfSig)); +#if USE_BIG_ENDIAN_IN_MPF + dataStruct->write(static_cast(kMpBigEndian), kMpEndianSize); +#else + dataStruct->write(static_cast(kMpLittleEndian), kMpEndianSize); +#endif + + // Set the Index IFD offset be the position after the endianness value and this offset. + constexpr uint32_t indexIfdOffset = static_cast(kMpEndianSize + sizeof(kMpfSig)); + dataStruct->write32(Endian_SwapBE32(indexIfdOffset)); + + // We will write 3 tags (version, number of images, MP entries). + dataStruct->write16(Endian_SwapBE16(kTagSerializedCount)); + + // Write the version tag. + dataStruct->write16(Endian_SwapBE16(kVersionTag)); + dataStruct->write16(Endian_SwapBE16(kVersionType)); + dataStruct->write32(Endian_SwapBE32(kVersionCount)); + dataStruct->write(kVersionExpected, kVersionSize); + + // Write the number of images. + dataStruct->write16(Endian_SwapBE16(kNumberOfImagesTag)); + dataStruct->write16(Endian_SwapBE16(kNumberOfImagesType)); + dataStruct->write32(Endian_SwapBE32(kNumberOfImagesCount)); + dataStruct->write32(Endian_SwapBE32(kNumPictures)); + + // Write the MP entries. + dataStruct->write16(Endian_SwapBE16(kMPEntryTag)); + dataStruct->write16(Endian_SwapBE16(kMPEntryType)); + dataStruct->write32(Endian_SwapBE32(kMPEntrySize * kNumPictures)); + const uint32_t mpEntryOffset = + static_cast(dataStruct->getBytesWritten() - // The bytes written so far + sizeof(kMpfSig) + // Excluding the MPF signature + sizeof(uint32_t) + // The 4 bytes for this offset + sizeof(uint32_t)); // The 4 bytes for the attribute IFD offset. + dataStruct->write32(Endian_SwapBE32(mpEntryOffset)); + + // Write the attribute IFD offset (zero because we don't write it). + dataStruct->write32(0); + + // Write the MP entries for primary image + dataStruct->write32(Endian_SwapBE32(kMPEntryAttributeFormatJpeg | kMPEntryAttributeTypePrimary)); + dataStruct->write32(Endian_SwapBE32(primary_image_size)); + dataStruct->write32(Endian_SwapBE32(primary_image_offset)); + dataStruct->write16(0); + dataStruct->write16(0); + + // Write the MP entries for secondary image + dataStruct->write32(Endian_SwapBE32(kMPEntryAttributeFormatJpeg)); + dataStruct->write32(Endian_SwapBE32(secondary_image_size)); + dataStruct->write32(Endian_SwapBE32(secondary_image_offset)); + dataStruct->write16(0); + dataStruct->write16(0); + + return dataStruct; +} + +} // namespace ultrahdr diff --git a/lib/ultrahdr.h b/lib/ultrahdr.h deleted file mode 100644 index fa69d57..0000000 --- a/lib/ultrahdr.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2023 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ULTRAHDR_ULTRAHDR_H -#define ULTRAHDR_ULTRAHDR_H - -#include - -namespace ultrahdr { -// Color gamuts for image data -typedef enum { - ULTRAHDR_COLORGAMUT_UNSPECIFIED = -1, - ULTRAHDR_COLORGAMUT_BT709, - ULTRAHDR_COLORGAMUT_P3, - ULTRAHDR_COLORGAMUT_BT2100, - ULTRAHDR_COLORGAMUT_MAX = ULTRAHDR_COLORGAMUT_BT2100, -} ultrahdr_color_gamut; - -// Transfer functions for image data -// TODO: TF LINEAR is deprecated, remove this enum and the code surrounding it. -typedef enum { - ULTRAHDR_TF_UNSPECIFIED = -1, - ULTRAHDR_TF_LINEAR = 0, - ULTRAHDR_TF_HLG = 1, - ULTRAHDR_TF_PQ = 2, - ULTRAHDR_TF_SRGB = 3, - ULTRAHDR_TF_MAX = ULTRAHDR_TF_SRGB, -} ultrahdr_transfer_function; - -// Target output formats for decoder -typedef enum { - ULTRAHDR_OUTPUT_UNSPECIFIED = -1, - ULTRAHDR_OUTPUT_SDR, // SDR in RGBA_8888 color format - ULTRAHDR_OUTPUT_HDR_LINEAR, // HDR in F16 color format (linear) - ULTRAHDR_OUTPUT_HDR_PQ, // HDR in RGBA_1010102 color format (PQ transfer function) - ULTRAHDR_OUTPUT_HDR_HLG, // HDR in RGBA_1010102 color format (HLG transfer function) - ULTRAHDR_OUTPUT_MAX = ULTRAHDR_OUTPUT_HDR_HLG, -} ultrahdr_output_format; - -/* - * Holds information for gain map related metadata. - * - * Not: all values stored in linear. This differs from the metadata encoding in XMP, where - * maxContentBoost (aka gainMapMax), minContentBoost (aka gainMapMin), hdrCapacityMin, and - * hdrCapacityMax are stored in log2 space. - */ -struct ultrahdr_metadata_struct { - // Ultra HDR format version - std::string version; - // Max Content Boost for the map - float maxContentBoost; - // Min Content Boost for the map - float minContentBoost; - // Gamma of the map data - float gamma; - // Offset for SDR data in map calculations - float offsetSdr; - // Offset for HDR data in map calculations - float offsetHdr; - // HDR capacity to apply the map at all - float hdrCapacityMin; - // HDR capacity to apply the map completely - float hdrCapacityMax; -}; -typedef struct ultrahdr_metadata_struct* ultrahdr_metadata_ptr; - -} // namespace ultrahdr - -#endif // ULTRAHDR_ULTRAHDR_H diff --git a/lib/ultrahdrcommon.h b/lib/ultrahdrcommon.h deleted file mode 100644 index ba3a3b8..0000000 --- a/lib/ultrahdrcommon.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2023 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ULTRAHDR_ULTRAHDRCOMMON_H -#define ULTRAHDR_ULTRAHDRCOMMON_H - -//#define LOG_NDEBUG 0 - -#ifdef __ANDROID__ -#include "log/log.h" -#else -#ifdef LOG_NDEBUG -#include - -#define ALOGD(...) \ - do { \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\n"); \ - } while (0) -#define ALOGE(...) \ - do { \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\n"); \ - } while (0) -#define ALOGI(...) \ - do { \ - fprintf(stdout, __VA_ARGS__); \ - fprintf(stdout, "\n"); \ - } while (0) -#define ALOGV(...) \ - do { \ - fprintf(stdout, __VA_ARGS__); \ - fprintf(stdout, "\n"); \ - } while (0) -#define ALOGW(...) \ - do { \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\n"); \ - } while (0) -#else -#define ALOGD(...) ((void)0) -#define ALOGE(...) ((void)0) -#define ALOGI(...) ((void)0) -#define ALOGV(...) ((void)0) -#define ALOGW(...) ((void)0) -#endif -#endif - -#define ALIGNM(x, m) ((((x) + ((m)-1)) / (m)) * (m)) - -#endif // ULTRAHDR_ULTRAHDRCOMMON_H diff --git a/tests/gainmapmath_test.cpp b/tests/gainmapmath_test.cpp index b474bd6..95fd159 100644 --- a/tests/gainmapmath_test.cpp +++ b/tests/gainmapmath_test.cpp @@ -17,7 +17,7 @@ #include #include -#include "gainmapmath.h" +#include "ultrahdr/gainmapmath.h" namespace ultrahdr { diff --git a/tests/icchelper_test.cpp b/tests/icchelper_test.cpp index 02ad27b..26f78a6 100644 --- a/tests/icchelper_test.cpp +++ b/tests/icchelper_test.cpp @@ -16,7 +16,7 @@ #include -#include "icc.h" +#include "ultrahdr/icc.h" namespace ultrahdr { diff --git a/tests/jpegdecoderhelper_test.cpp b/tests/jpegdecoderhelper_test.cpp index 3b8e88e..4e72be1 100644 --- a/tests/jpegdecoderhelper_test.cpp +++ b/tests/jpegdecoderhelper_test.cpp @@ -19,9 +19,9 @@ #include #include -#include "ultrahdrcommon.h" -#include "jpegdecoderhelper.h" -#include "icc.h" +#include "ultrahdr/ultrahdrcommon.h" +#include "ultrahdr/jpegdecoderhelper.h" +#include "ultrahdr/icc.h" namespace ultrahdr { diff --git a/tests/jpegencoderhelper_test.cpp b/tests/jpegencoderhelper_test.cpp index 3463c26..23ab3f3 100644 --- a/tests/jpegencoderhelper_test.cpp +++ b/tests/jpegencoderhelper_test.cpp @@ -19,9 +19,9 @@ #include #include -#include "ultrahdrcommon.h" -#include "ultrahdr.h" -#include "jpegencoderhelper.h" +#include "ultrahdr/ultrahdrcommon.h" +#include "ultrahdr/ultrahdr.h" +#include "ultrahdr/jpegencoderhelper.h" namespace ultrahdr { diff --git a/tests/jpegr_test.cpp b/tests/jpegr_test.cpp index bfbfcde..39c7af0 100644 --- a/tests/jpegr_test.cpp +++ b/tests/jpegr_test.cpp @@ -24,9 +24,9 @@ #include #include -#include "ultrahdrcommon.h" -#include "jpegr.h" -#include "jpegrutils.h" +#include "ultrahdr/ultrahdrcommon.h" +#include "ultrahdr/jpegr.h" +#include "ultrahdr/jpegrutils.h" //#define DUMP_OUTPUT -- cgit v1.2.3