aboutsummaryrefslogtreecommitdiff
path: root/webrtc/modules/video_processing
diff options
context:
space:
mode:
Diffstat (limited to 'webrtc/modules/video_processing')
-rw-r--r--webrtc/modules/video_processing/BUILD.gn65
-rw-r--r--webrtc/modules/video_processing/OWNERS5
-rw-r--r--webrtc/modules/video_processing/brightness_detection.cc (renamed from webrtc/modules/video_processing/main/source/brightness_detection.cc)65
-rw-r--r--webrtc/modules/video_processing/brightness_detection.h (renamed from webrtc/modules/video_processing/main/source/brightness_detection.h)14
-rw-r--r--webrtc/modules/video_processing/content_analysis.cc (renamed from webrtc/modules/video_processing/main/source/content_analysis.cc)85
-rw-r--r--webrtc/modules/video_processing/content_analysis.h (renamed from webrtc/modules/video_processing/main/source/content_analysis.h)16
-rw-r--r--webrtc/modules/video_processing/content_analysis_sse2.cc (renamed from webrtc/modules/video_processing/main/source/content_analysis_sse2.cc)181
-rw-r--r--webrtc/modules/video_processing/deflickering.cc (renamed from webrtc/modules/video_processing/main/source/deflickering.cc)86
-rw-r--r--webrtc/modules/video_processing/deflickering.h (renamed from webrtc/modules/video_processing/main/source/deflickering.h)27
-rw-r--r--webrtc/modules/video_processing/frame_preprocessor.cc (renamed from webrtc/modules/video_processing/main/source/frame_preprocessor.cc)87
-rw-r--r--webrtc/modules/video_processing/frame_preprocessor.h (renamed from webrtc/modules/video_processing/main/source/frame_preprocessor.h)45
-rw-r--r--webrtc/modules/video_processing/include/video_processing.h102
-rw-r--r--webrtc/modules/video_processing/include/video_processing_defines.h (renamed from webrtc/modules/video_processing/main/interface/video_processing_defines.h)28
-rw-r--r--webrtc/modules/video_processing/main/interface/video_processing.h270
-rw-r--r--webrtc/modules/video_processing/main/source/OWNERS5
-rw-r--r--webrtc/modules/video_processing/main/source/brighten.cc45
-rw-r--r--webrtc/modules/video_processing/main/source/brighten.h25
-rw-r--r--webrtc/modules/video_processing/main/source/video_processing_impl.cc183
-rw-r--r--webrtc/modules/video_processing/main/source/video_processing_impl.h75
-rw-r--r--webrtc/modules/video_processing/main/test/unit_test/brightness_detection_test.cc121
-rw-r--r--webrtc/modules/video_processing/main/test/unit_test/deflickering_test.cc100
-rw-r--r--webrtc/modules/video_processing/spatial_resampler.cc (renamed from webrtc/modules/video_processing/main/source/spatial_resampler.cc)27
-rw-r--r--webrtc/modules/video_processing/spatial_resampler.h (renamed from webrtc/modules/video_processing/main/source/spatial_resampler.h)25
-rw-r--r--webrtc/modules/video_processing/test/brightness_detection_test.cc120
-rw-r--r--webrtc/modules/video_processing/test/content_metrics_test.cc (renamed from webrtc/modules/video_processing/main/test/unit_test/content_metrics_test.cc)30
-rw-r--r--webrtc/modules/video_processing/test/createTable.m (renamed from webrtc/modules/video_processing/main/test/unit_test/createTable.m)4
-rw-r--r--webrtc/modules/video_processing/test/deflickering_test.cc98
-rw-r--r--webrtc/modules/video_processing/test/denoiser_test.cc156
-rw-r--r--webrtc/modules/video_processing/test/readYUV420file.m (renamed from webrtc/modules/video_processing/main/test/unit_test/readYUV420file.m)10
-rw-r--r--webrtc/modules/video_processing/test/video_processing_unittest.cc (renamed from webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.cc)247
-rw-r--r--webrtc/modules/video_processing/test/video_processing_unittest.h (renamed from webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.h)20
-rw-r--r--webrtc/modules/video_processing/test/writeYUV420file.m (renamed from webrtc/modules/video_processing/main/test/unit_test/writeYUV420file.m)4
-rw-r--r--webrtc/modules/video_processing/util/denoiser_filter.cc54
-rw-r--r--webrtc/modules/video_processing/util/denoiser_filter.h63
-rw-r--r--webrtc/modules/video_processing/util/denoiser_filter_c.cc194
-rw-r--r--webrtc/modules/video_processing/util/denoiser_filter_c.h46
-rw-r--r--webrtc/modules/video_processing/util/denoiser_filter_neon.cc283
-rw-r--r--webrtc/modules/video_processing/util/denoiser_filter_neon.h46
-rw-r--r--webrtc/modules/video_processing/util/denoiser_filter_sse2.cc280
-rw-r--r--webrtc/modules/video_processing/util/denoiser_filter_sse2.h46
-rw-r--r--webrtc/modules/video_processing/util/skin_detection.cc65
-rwxr-xr-xwebrtc/modules/video_processing/util/skin_detection.h28
-rw-r--r--webrtc/modules/video_processing/video_decimator.cc (renamed from webrtc/modules/video_processing/main/source/video_decimator.cc)50
-rw-r--r--webrtc/modules/video_processing/video_decimator.h (renamed from webrtc/modules/video_processing/main/source/video_decimator.h)14
-rw-r--r--webrtc/modules/video_processing/video_denoiser.cc147
-rw-r--r--webrtc/modules/video_processing/video_denoiser.h38
-rw-r--r--webrtc/modules/video_processing/video_processing.gypi62
-rw-r--r--webrtc/modules/video_processing/video_processing_impl.cc179
-rw-r--r--webrtc/modules/video_processing/video_processing_impl.h55
49 files changed, 2655 insertions, 1366 deletions
diff --git a/webrtc/modules/video_processing/BUILD.gn b/webrtc/modules/video_processing/BUILD.gn
index 00d2911eef..6d411edda1 100644
--- a/webrtc/modules/video_processing/BUILD.gn
+++ b/webrtc/modules/video_processing/BUILD.gn
@@ -6,30 +6,37 @@
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
+import("//build/config/arm.gni")
import("../../build/webrtc.gni")
build_video_processing_sse2 = current_cpu == "x86" || current_cpu == "x64"
source_set("video_processing") {
sources = [
- "main/interface/video_processing.h",
- "main/interface/video_processing_defines.h",
- "main/source/brighten.cc",
- "main/source/brighten.h",
- "main/source/brightness_detection.cc",
- "main/source/brightness_detection.h",
- "main/source/content_analysis.cc",
- "main/source/content_analysis.h",
- "main/source/deflickering.cc",
- "main/source/deflickering.h",
- "main/source/frame_preprocessor.cc",
- "main/source/frame_preprocessor.h",
- "main/source/spatial_resampler.cc",
- "main/source/spatial_resampler.h",
- "main/source/video_decimator.cc",
- "main/source/video_decimator.h",
- "main/source/video_processing_impl.cc",
- "main/source/video_processing_impl.h",
+ "brightness_detection.cc",
+ "brightness_detection.h",
+ "content_analysis.cc",
+ "content_analysis.h",
+ "deflickering.cc",
+ "deflickering.h",
+ "frame_preprocessor.cc",
+ "frame_preprocessor.h",
+ "include/video_processing.h",
+ "include/video_processing_defines.h",
+ "spatial_resampler.cc",
+ "spatial_resampler.h",
+ "util/denoiser_filter.cc",
+ "util/denoiser_filter.h",
+ "util/denoiser_filter_c.cc",
+ "util/denoiser_filter_c.h",
+ "util/skin_detection.cc",
+ "util/skin_detection.h",
+ "video_decimator.cc",
+ "video_decimator.h",
+ "video_denoiser.cc",
+ "video_denoiser.h",
+ "video_processing_impl.cc",
+ "video_processing_impl.h",
]
deps = [
@@ -41,6 +48,9 @@ source_set("video_processing") {
if (build_video_processing_sse2) {
deps += [ ":video_processing_sse2" ]
}
+ if (rtc_build_with_neon) {
+ deps += [ ":video_processing_neon" ]
+ }
configs += [ "../..:common_config" ]
public_configs = [ "../..:common_inherited_config" ]
@@ -55,7 +65,9 @@ source_set("video_processing") {
if (build_video_processing_sse2) {
source_set("video_processing_sse2") {
sources = [
- "main/source/content_analysis_sse2.cc",
+ "content_analysis_sse2.cc",
+ "util/denoiser_filter_sse2.cc",
+ "util/denoiser_filter_sse2.h",
]
configs += [ "../..:common_config" ]
@@ -72,3 +84,18 @@ if (build_video_processing_sse2) {
}
}
}
+
+if (rtc_build_with_neon) {
+ source_set("video_processing_neon") {
+ sources = [
+ "util/denoiser_filter_neon.cc",
+ "util/denoiser_filter_neon.h",
+ ]
+ if (current_cpu != "arm64") {
+ configs -= [ "//build/config/compiler:compiler_arm_fpu" ]
+ cflags = [ "-mfpu=neon" ]
+ }
+ configs += [ "../..:common_config" ]
+ public_configs = [ "../..:common_inherited_config" ]
+ }
+}
diff --git a/webrtc/modules/video_processing/OWNERS b/webrtc/modules/video_processing/OWNERS
index f452c9ed83..389d632dfd 100644
--- a/webrtc/modules/video_processing/OWNERS
+++ b/webrtc/modules/video_processing/OWNERS
@@ -1,4 +1,9 @@
stefan@webrtc.org
marpan@webrtc.org
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gyp=*
+per-file *.gypi=*
+
per-file BUILD.gn=kjellander@webrtc.org
diff --git a/webrtc/modules/video_processing/main/source/brightness_detection.cc b/webrtc/modules/video_processing/brightness_detection.cc
index bae225b3b0..7455cf9759 100644
--- a/webrtc/modules/video_processing/main/source/brightness_detection.cc
+++ b/webrtc/modules/video_processing/brightness_detection.cc
@@ -8,11 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/modules/video_processing/main/source/brightness_detection.h"
+#include "webrtc/modules/video_processing/brightness_detection.h"
#include <math.h>
+#include "webrtc/modules/video_processing/include/video_processing.h"
+
namespace webrtc {
VPMBrightnessDetection::VPMBrightnessDetection() {
@@ -28,14 +29,14 @@ void VPMBrightnessDetection::Reset() {
int32_t VPMBrightnessDetection::ProcessFrame(
const VideoFrame& frame,
- const VideoProcessingModule::FrameStats& stats) {
+ const VideoProcessing::FrameStats& stats) {
if (frame.IsZeroSize()) {
return VPM_PARAMETER_ERROR;
}
int width = frame.width();
int height = frame.height();
- if (!VideoProcessingModule::ValidFrameStats(stats)) {
+ if (!VideoProcessing::ValidFrameStats(stats)) {
return VPM_PARAMETER_ERROR;
}
@@ -62,11 +63,11 @@ int32_t VPMBrightnessDetection::ProcessFrame(
// Standard deviation of Y
const uint8_t* buffer = frame.buffer(kYPlane);
float std_y = 0;
- for (int h = 0; h < height; h += (1 << stats.subSamplHeight)) {
- int row = h*width;
- for (int w = 0; w < width; w += (1 << stats.subSamplWidth)) {
- std_y += (buffer[w + row] - stats.mean) * (buffer[w + row] -
- stats.mean);
+ for (int h = 0; h < height; h += (1 << stats.sub_sampling_factor)) {
+ int row = h * width;
+ for (int w = 0; w < width; w += (1 << stats.sub_sampling_factor)) {
+ std_y +=
+ (buffer[w + row] - stats.mean) * (buffer[w + row] - stats.mean);
}
}
std_y = sqrt(std_y / stats.num_pixels);
@@ -81,37 +82,39 @@ int32_t VPMBrightnessDetection::ProcessFrame(
float posPerc95 = stats.num_pixels * 0.95f;
for (uint32_t i = 0; i < 256; i++) {
sum += stats.hist[i];
- if (sum < pos_perc05) perc05 = i; // 5th perc.
- if (sum < pos_median) median_y = i; // 50th perc.
+ if (sum < pos_perc05)
+ perc05 = i; // 5th perc.
+ if (sum < pos_median)
+ median_y = i; // 50th perc.
if (sum < posPerc95)
- perc95 = i; // 95th perc.
+ perc95 = i; // 95th perc.
else
break;
}
- // Check if image is too dark
- if ((std_y < 55) && (perc05 < 50)) {
- if (median_y < 60 || stats.mean < 80 || perc95 < 130 ||
- prop_low > 0.20) {
- frame_cnt_dark_++;
- } else {
- frame_cnt_dark_ = 0;
- }
+ // Check if image is too dark
+ if ((std_y < 55) && (perc05 < 50)) {
+ if (median_y < 60 || stats.mean < 80 || perc95 < 130 ||
+ prop_low > 0.20) {
+ frame_cnt_dark_++;
} else {
frame_cnt_dark_ = 0;
}
+ } else {
+ frame_cnt_dark_ = 0;
+ }
- // Check if image is too bright
- if ((std_y < 52) && (perc95 > 200) && (median_y > 160)) {
- if (median_y > 185 || stats.mean > 185 || perc05 > 140 ||
- prop_high > 0.25) {
- frame_cnt_bright_++;
- } else {
- frame_cnt_bright_ = 0;
- }
+ // Check if image is too bright
+ if ((std_y < 52) && (perc95 > 200) && (median_y > 160)) {
+ if (median_y > 185 || stats.mean > 185 || perc05 > 140 ||
+ prop_high > 0.25) {
+ frame_cnt_bright_++;
} else {
frame_cnt_bright_ = 0;
}
+ } else {
+ frame_cnt_bright_ = 0;
+ }
} else {
frame_cnt_dark_ = 0;
frame_cnt_bright_ = 0;
@@ -122,11 +125,11 @@ int32_t VPMBrightnessDetection::ProcessFrame(
}
if (frame_cnt_dark_ > frame_cnt_alarm) {
- return VideoProcessingModule::kDarkWarning;
+ return VideoProcessing::kDarkWarning;
} else if (frame_cnt_bright_ > frame_cnt_alarm) {
- return VideoProcessingModule::kBrightWarning;
+ return VideoProcessing::kBrightWarning;
} else {
- return VideoProcessingModule::kNoWarning;
+ return VideoProcessing::kNoWarning;
}
}
diff --git a/webrtc/modules/video_processing/main/source/brightness_detection.h b/webrtc/modules/video_processing/brightness_detection.h
index 48532b4a20..78a7ac5e0b 100644
--- a/webrtc/modules/video_processing/main/source/brightness_detection.h
+++ b/webrtc/modules/video_processing/brightness_detection.h
@@ -8,12 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-/*
- * brightness_detection.h
- */
-#ifndef MODULES_VIDEO_PROCESSING_MAIN_SOURCE_BRIGHTNESS_DETECTION_H
-#define MODULES_VIDEO_PROCESSING_MAIN_SOURCE_BRIGHTNESS_DETECTION_H
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_BRIGHTNESS_DETECTION_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_BRIGHTNESS_DETECTION_H_
+
+#include "webrtc/modules/video_processing/include/video_processing.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -25,7 +23,7 @@ class VPMBrightnessDetection {
void Reset();
int32_t ProcessFrame(const VideoFrame& frame,
- const VideoProcessingModule::FrameStats& stats);
+ const VideoProcessing::FrameStats& stats);
private:
uint32_t frame_cnt_bright_;
@@ -34,4 +32,4 @@ class VPMBrightnessDetection {
} // namespace webrtc
-#endif // MODULES_VIDEO_PROCESSING_MAIN_SOURCE_BRIGHTNESS_DETECTION_H
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_BRIGHTNESS_DETECTION_H_
diff --git a/webrtc/modules/video_processing/main/source/content_analysis.cc b/webrtc/modules/video_processing/content_analysis.cc
index d29db27408..54c04da466 100644
--- a/webrtc/modules/video_processing/main/source/content_analysis.cc
+++ b/webrtc/modules/video_processing/content_analysis.cc
@@ -7,7 +7,7 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_processing/main/source/content_analysis.h"
+#include "webrtc/modules/video_processing/content_analysis.h"
#include <math.h>
#include <stdlib.h>
@@ -72,7 +72,7 @@ VideoContentMetrics* VPMContentAnalysis::ComputeContentMetrics(
// Saving current frame as previous one: Y only.
memcpy(prev_frame_, orig_frame_, width_ * height_);
- first_frame_ = false;
+ first_frame_ = false;
ca_Init_ = true;
return ContentMetrics();
@@ -85,7 +85,7 @@ int32_t VPMContentAnalysis::Release() {
}
if (prev_frame_ != NULL) {
- delete [] prev_frame_;
+ delete[] prev_frame_;
prev_frame_ = NULL;
}
@@ -106,11 +106,11 @@ int32_t VPMContentAnalysis::Initialize(int width, int height) {
skip_num_ = 1;
// use skipNum = 2 for 4CIF, WHD
- if ( (height_ >= 576) && (width_ >= 704) ) {
+ if ((height_ >= 576) && (width_ >= 704)) {
skip_num_ = 2;
}
// use skipNum = 4 for FULLL_HD images
- if ( (height_ >= 1080) && (width_ >= 1920) ) {
+ if ((height_ >= 1080) && (width_ >= 1920)) {
skip_num_ = 4;
}
@@ -119,7 +119,7 @@ int32_t VPMContentAnalysis::Initialize(int width, int height) {
}
if (prev_frame_ != NULL) {
- delete [] prev_frame_;
+ delete[] prev_frame_;
}
// Spatial Metrics don't work on a border of 8. Minimum processing
@@ -135,12 +135,12 @@ int32_t VPMContentAnalysis::Initialize(int width, int height) {
}
prev_frame_ = new uint8_t[width_ * height_]; // Y only.
- if (prev_frame_ == NULL) return VPM_MEMORY;
+ if (prev_frame_ == NULL)
+ return VPM_MEMORY;
return VPM_OK;
}
-
// Compute motion metrics: magnitude over non-zero motion vectors,
// and size of zero cluster
int32_t VPMContentAnalysis::ComputeMotionMetrics() {
@@ -163,36 +163,41 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_C() {
uint64_t pixelSqSum = 0;
uint32_t num_pixels = 0; // Counter for # of pixels.
- const int width_end = ((width_ - 2*border_) & -16) + border_;
+ const int width_end = ((width_ - 2 * border_) & -16) + border_;
for (int i = border_; i < sizei - border_; i += skip_num_) {
for (int j = border_; j < width_end; j++) {
num_pixels += 1;
- int ssn = i * sizej + j;
+ int ssn = i * sizej + j;
- uint8_t currPixel = orig_frame_[ssn];
- uint8_t prevPixel = prev_frame_[ssn];
+ uint8_t currPixel = orig_frame_[ssn];
+ uint8_t prevPixel = prev_frame_[ssn];
- tempDiffSum += (uint32_t)abs((int16_t)(currPixel - prevPixel));
- pixelSum += (uint32_t) currPixel;
- pixelSqSum += (uint64_t) (currPixel * currPixel);
+ tempDiffSum +=
+ static_cast<uint32_t>(abs((int16_t)(currPixel - prevPixel)));
+ pixelSum += static_cast<uint32_t>(currPixel);
+ pixelSqSum += static_cast<uint64_t>(currPixel * currPixel);
}
}
// Default.
motion_magnitude_ = 0.0f;
- if (tempDiffSum == 0) return VPM_OK;
+ if (tempDiffSum == 0)
+ return VPM_OK;
// Normalize over all pixels.
- float const tempDiffAvg = (float)tempDiffSum / (float)(num_pixels);
- float const pixelSumAvg = (float)pixelSum / (float)(num_pixels);
- float const pixelSqSumAvg = (float)pixelSqSum / (float)(num_pixels);
+ float const tempDiffAvg =
+ static_cast<float>(tempDiffSum) / static_cast<float>(num_pixels);
+ float const pixelSumAvg =
+ static_cast<float>(pixelSum) / static_cast<float>(num_pixels);
+ float const pixelSqSumAvg =
+ static_cast<float>(pixelSqSum) / static_cast<float>(num_pixels);
float contrast = pixelSqSumAvg - (pixelSumAvg * pixelSumAvg);
if (contrast > 0.0) {
contrast = sqrt(contrast);
- motion_magnitude_ = tempDiffAvg/contrast;
+ motion_magnitude_ = tempDiffAvg / contrast;
}
return VPM_OK;
}
@@ -216,39 +221,40 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_C() {
uint32_t spatialErrHSum = 0;
// make sure work section is a multiple of 16
- const int width_end = ((sizej - 2*border_) & -16) + border_;
+ const int width_end = ((sizej - 2 * border_) & -16) + border_;
for (int i = border_; i < sizei - border_; i += skip_num_) {
for (int j = border_; j < width_end; j++) {
- int ssn1= i * sizej + j;
- int ssn2 = (i + 1) * sizej + j; // bottom
- int ssn3 = (i - 1) * sizej + j; // top
- int ssn4 = i * sizej + j + 1; // right
- int ssn5 = i * sizej + j - 1; // left
+ int ssn1 = i * sizej + j;
+ int ssn2 = (i + 1) * sizej + j; // bottom
+ int ssn3 = (i - 1) * sizej + j; // top
+ int ssn4 = i * sizej + j + 1; // right
+ int ssn5 = i * sizej + j - 1; // left
- uint16_t refPixel1 = orig_frame_[ssn1] << 1;
- uint16_t refPixel2 = orig_frame_[ssn1] << 2;
+ uint16_t refPixel1 = orig_frame_[ssn1] << 1;
+ uint16_t refPixel2 = orig_frame_[ssn1] << 2;
uint8_t bottPixel = orig_frame_[ssn2];
uint8_t topPixel = orig_frame_[ssn3];
uint8_t rightPixel = orig_frame_[ssn4];
uint8_t leftPixel = orig_frame_[ssn5];
- spatialErrSum += (uint32_t) abs((int16_t)(refPixel2
- - (uint16_t)(bottPixel + topPixel + leftPixel + rightPixel)));
- spatialErrVSum += (uint32_t) abs((int16_t)(refPixel1
- - (uint16_t)(bottPixel + topPixel)));
- spatialErrHSum += (uint32_t) abs((int16_t)(refPixel1
- - (uint16_t)(leftPixel + rightPixel)));
+ spatialErrSum += static_cast<uint32_t>(abs(static_cast<int16_t>(
+ refPixel2 - static_cast<uint16_t>(bottPixel + topPixel + leftPixel +
+ rightPixel))));
+ spatialErrVSum += static_cast<uint32_t>(abs(static_cast<int16_t>(
+ refPixel1 - static_cast<uint16_t>(bottPixel + topPixel))));
+ spatialErrHSum += static_cast<uint32_t>(abs(static_cast<int16_t>(
+ refPixel1 - static_cast<uint16_t>(leftPixel + rightPixel))));
pixelMSA += orig_frame_[ssn1];
}
}
// Normalize over all pixels.
- const float spatialErr = (float)(spatialErrSum >> 2);
- const float spatialErrH = (float)(spatialErrHSum >> 1);
- const float spatialErrV = (float)(spatialErrVSum >> 1);
- const float norm = (float)pixelMSA;
+ const float spatialErr = static_cast<float>(spatialErrSum >> 2);
+ const float spatialErrH = static_cast<float>(spatialErrHSum >> 1);
+ const float spatialErrV = static_cast<float>(spatialErrVSum >> 1);
+ const float norm = static_cast<float>(pixelMSA);
// 2X2:
spatial_pred_err_ = spatialErr / norm;
@@ -260,7 +266,8 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_C() {
}
VideoContentMetrics* VPMContentAnalysis::ContentMetrics() {
- if (ca_Init_ == false) return NULL;
+ if (ca_Init_ == false)
+ return NULL;
content_metrics_->spatial_pred_err = spatial_pred_err_;
content_metrics_->spatial_pred_err_h = spatial_pred_err_h_;
diff --git a/webrtc/modules/video_processing/main/source/content_analysis.h b/webrtc/modules/video_processing/content_analysis.h
index 510c1b4a55..d3a11bd091 100644
--- a/webrtc/modules/video_processing/main/source/content_analysis.h
+++ b/webrtc/modules/video_processing/content_analysis.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_CONTENT_ANALYSIS_H
-#define WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_CONTENT_ANALYSIS_H
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_CONTENT_ANALYSIS_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_CONTENT_ANALYSIS_H_
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing_defines.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_processing/include/video_processing_defines.h"
#include "webrtc/typedefs.h"
#include "webrtc/video_frame.h"
@@ -72,16 +72,16 @@ class VPMContentAnalysis {
int border_;
// Content Metrics: Stores the local average of the metrics.
- float motion_magnitude_; // motion class
- float spatial_pred_err_; // spatial class
+ float motion_magnitude_; // motion class
+ float spatial_pred_err_; // spatial class
float spatial_pred_err_h_; // spatial class
float spatial_pred_err_v_; // spatial class
bool first_frame_;
bool ca_Init_;
- VideoContentMetrics* content_metrics_;
+ VideoContentMetrics* content_metrics_;
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_CONTENT_ANALYSIS_H
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_CONTENT_ANALYSIS_H_
diff --git a/webrtc/modules/video_processing/main/source/content_analysis_sse2.cc b/webrtc/modules/video_processing/content_analysis_sse2.cc
index 17b64ff280..7a60a89b45 100644
--- a/webrtc/modules/video_processing/main/source/content_analysis_sse2.cc
+++ b/webrtc/modules/video_processing/content_analysis_sse2.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_processing/main/source/content_analysis.h"
+#include "webrtc/modules/video_processing/content_analysis.h"
#include <emmintrin.h>
#include <math.h>
@@ -16,22 +16,22 @@
namespace webrtc {
int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
- uint32_t num_pixels = 0; // counter for # of pixels
- const uint8_t* imgBufO = orig_frame_ + border_*width_ + border_;
- const uint8_t* imgBufP = prev_frame_ + border_*width_ + border_;
+ uint32_t num_pixels = 0; // counter for # of pixels
+ const uint8_t* imgBufO = orig_frame_ + border_ * width_ + border_;
+ const uint8_t* imgBufP = prev_frame_ + border_ * width_ + border_;
- const int32_t width_end = ((width_ - 2*border_) & -16) + border_;
+ const int32_t width_end = ((width_ - 2 * border_) & -16) + border_;
- __m128i sad_64 = _mm_setzero_si128();
- __m128i sum_64 = _mm_setzero_si128();
+ __m128i sad_64 = _mm_setzero_si128();
+ __m128i sum_64 = _mm_setzero_si128();
__m128i sqsum_64 = _mm_setzero_si128();
- const __m128i z = _mm_setzero_si128();
+ const __m128i z = _mm_setzero_si128();
- for (uint16_t i = 0; i < (height_ - 2*border_); i += skip_num_) {
- __m128i sqsum_32 = _mm_setzero_si128();
+ for (uint16_t i = 0; i < (height_ - 2 * border_); i += skip_num_) {
+ __m128i sqsum_32 = _mm_setzero_si128();
- const uint8_t *lineO = imgBufO;
- const uint8_t *lineP = imgBufP;
+ const uint8_t* lineO = imgBufO;
+ const uint8_t* lineP = imgBufP;
// Work on 16 pixels at a time. For HD content with a width of 1920
// this loop will run ~67 times (depending on border). Maximum for
@@ -49,14 +49,14 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
lineP += 16;
// Abs pixel difference between frames.
- sad_64 = _mm_add_epi64 (sad_64, _mm_sad_epu8(o, p));
+ sad_64 = _mm_add_epi64(sad_64, _mm_sad_epu8(o, p));
// sum of all pixels in frame
- sum_64 = _mm_add_epi64 (sum_64, _mm_sad_epu8(o, z));
+ sum_64 = _mm_add_epi64(sum_64, _mm_sad_epu8(o, z));
// Squared sum of all pixels in frame.
- const __m128i olo = _mm_unpacklo_epi8(o,z);
- const __m128i ohi = _mm_unpackhi_epi8(o,z);
+ const __m128i olo = _mm_unpacklo_epi8(o, z);
+ const __m128i ohi = _mm_unpackhi_epi8(o, z);
const __m128i sqsum_32_lo = _mm_madd_epi16(olo, olo);
const __m128i sqsum_32_hi = _mm_madd_epi16(ohi, ohi);
@@ -66,9 +66,9 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
}
// Add to 64 bit running sum as to not roll over.
- sqsum_64 = _mm_add_epi64(sqsum_64,
- _mm_add_epi64(_mm_unpackhi_epi32(sqsum_32,z),
- _mm_unpacklo_epi32(sqsum_32,z)));
+ sqsum_64 =
+ _mm_add_epi64(sqsum_64, _mm_add_epi64(_mm_unpackhi_epi32(sqsum_32, z),
+ _mm_unpacklo_epi32(sqsum_32, z)));
imgBufO += width_ * skip_num_;
imgBufP += width_ * skip_num_;
@@ -81,13 +81,13 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
// Bring sums out of vector registers and into integer register
// domain, summing them along the way.
- _mm_store_si128 (&sad_final_128, sad_64);
- _mm_store_si128 (&sum_final_128, sum_64);
- _mm_store_si128 (&sqsum_final_128, sqsum_64);
+ _mm_store_si128(&sad_final_128, sad_64);
+ _mm_store_si128(&sum_final_128, sum_64);
+ _mm_store_si128(&sqsum_final_128, sqsum_64);
- uint64_t *sad_final_64 = reinterpret_cast<uint64_t*>(&sad_final_128);
- uint64_t *sum_final_64 = reinterpret_cast<uint64_t*>(&sum_final_128);
- uint64_t *sqsum_final_64 = reinterpret_cast<uint64_t*>(&sqsum_final_128);
+ uint64_t* sad_final_64 = reinterpret_cast<uint64_t*>(&sad_final_128);
+ uint64_t* sum_final_64 = reinterpret_cast<uint64_t*>(&sum_final_128);
+ uint64_t* sqsum_final_64 = reinterpret_cast<uint64_t*>(&sqsum_final_128);
const uint32_t pixelSum = sum_final_64[0] + sum_final_64[1];
const uint64_t pixelSqSum = sqsum_final_64[0] + sqsum_final_64[1];
@@ -96,27 +96,31 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
// Default.
motion_magnitude_ = 0.0f;
- if (tempDiffSum == 0) return VPM_OK;
+ if (tempDiffSum == 0)
+ return VPM_OK;
// Normalize over all pixels.
- const float tempDiffAvg = (float)tempDiffSum / (float)(num_pixels);
- const float pixelSumAvg = (float)pixelSum / (float)(num_pixels);
- const float pixelSqSumAvg = (float)pixelSqSum / (float)(num_pixels);
+ const float tempDiffAvg =
+ static_cast<float>(tempDiffSum) / static_cast<float>(num_pixels);
+ const float pixelSumAvg =
+ static_cast<float>(pixelSum) / static_cast<float>(num_pixels);
+ const float pixelSqSumAvg =
+ static_cast<float>(pixelSqSum) / static_cast<float>(num_pixels);
float contrast = pixelSqSumAvg - (pixelSumAvg * pixelSumAvg);
if (contrast > 0.0) {
contrast = sqrt(contrast);
- motion_magnitude_ = tempDiffAvg/contrast;
+ motion_magnitude_ = tempDiffAvg / contrast;
}
return VPM_OK;
}
int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
- const uint8_t* imgBuf = orig_frame_ + border_*width_;
+ const uint8_t* imgBuf = orig_frame_ + border_ * width_;
const int32_t width_end = ((width_ - 2 * border_) & -16) + border_;
- __m128i se_32 = _mm_setzero_si128();
+ __m128i se_32 = _mm_setzero_si128();
__m128i sev_32 = _mm_setzero_si128();
__m128i seh_32 = _mm_setzero_si128();
__m128i msa_32 = _mm_setzero_si128();
@@ -127,8 +131,8 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
// value is maxed out at 65529 for every row, 65529*1080 = 70777800, which
// will not roll over a 32 bit accumulator.
// skip_num_ is also used to reduce the number of rows
- for (int32_t i = 0; i < (height_ - 2*border_); i += skip_num_) {
- __m128i se_16 = _mm_setzero_si128();
+ for (int32_t i = 0; i < (height_ - 2 * border_); i += skip_num_) {
+ __m128i se_16 = _mm_setzero_si128();
__m128i sev_16 = _mm_setzero_si128();
__m128i seh_16 = _mm_setzero_si128();
__m128i msa_16 = _mm_setzero_si128();
@@ -143,9 +147,9 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
// border_ could also be adjusted to concentrate on just the center of
// the images for an HD capture in order to reduce the possiblity of
// rollover.
- const uint8_t *lineTop = imgBuf - width_ + border_;
- const uint8_t *lineCen = imgBuf + border_;
- const uint8_t *lineBot = imgBuf + width_ + border_;
+ const uint8_t* lineTop = imgBuf - width_ + border_;
+ const uint8_t* lineCen = imgBuf + border_;
+ const uint8_t* lineBot = imgBuf + width_ + border_;
for (int32_t j = 0; j < width_end - border_; j += 16) {
const __m128i t = _mm_loadu_si128((__m128i*)(lineTop));
@@ -159,20 +163,20 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
lineBot += 16;
// center pixel unpacked
- __m128i clo = _mm_unpacklo_epi8(c,z);
- __m128i chi = _mm_unpackhi_epi8(c,z);
+ __m128i clo = _mm_unpacklo_epi8(c, z);
+ __m128i chi = _mm_unpackhi_epi8(c, z);
// left right pixels unpacked and added together
- const __m128i lrlo = _mm_add_epi16(_mm_unpacklo_epi8(l,z),
- _mm_unpacklo_epi8(r,z));
- const __m128i lrhi = _mm_add_epi16(_mm_unpackhi_epi8(l,z),
- _mm_unpackhi_epi8(r,z));
+ const __m128i lrlo =
+ _mm_add_epi16(_mm_unpacklo_epi8(l, z), _mm_unpacklo_epi8(r, z));
+ const __m128i lrhi =
+ _mm_add_epi16(_mm_unpackhi_epi8(l, z), _mm_unpackhi_epi8(r, z));
// top & bottom pixels unpacked and added together
- const __m128i tblo = _mm_add_epi16(_mm_unpacklo_epi8(t,z),
- _mm_unpacklo_epi8(b,z));
- const __m128i tbhi = _mm_add_epi16(_mm_unpackhi_epi8(t,z),
- _mm_unpackhi_epi8(b,z));
+ const __m128i tblo =
+ _mm_add_epi16(_mm_unpacklo_epi8(t, z), _mm_unpacklo_epi8(b, z));
+ const __m128i tbhi =
+ _mm_add_epi16(_mm_unpackhi_epi8(t, z), _mm_unpackhi_epi8(b, z));
// running sum of all pixels
msa_16 = _mm_add_epi16(msa_16, _mm_add_epi16(chi, clo));
@@ -190,29 +194,32 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
const __m128i sethi = _mm_subs_epi16(chi, _mm_add_epi16(lrhi, tbhi));
// Add to 16 bit running sum
- se_16 = _mm_add_epi16(se_16, _mm_max_epi16(setlo,
- _mm_subs_epi16(z, setlo)));
- se_16 = _mm_add_epi16(se_16, _mm_max_epi16(sethi,
- _mm_subs_epi16(z, sethi)));
- sev_16 = _mm_add_epi16(sev_16, _mm_max_epi16(sevtlo,
- _mm_subs_epi16(z, sevtlo)));
- sev_16 = _mm_add_epi16(sev_16, _mm_max_epi16(sevthi,
- _mm_subs_epi16(z, sevthi)));
- seh_16 = _mm_add_epi16(seh_16, _mm_max_epi16(sehtlo,
- _mm_subs_epi16(z, sehtlo)));
- seh_16 = _mm_add_epi16(seh_16, _mm_max_epi16(sehthi,
- _mm_subs_epi16(z, sehthi)));
+ se_16 =
+ _mm_add_epi16(se_16, _mm_max_epi16(setlo, _mm_subs_epi16(z, setlo)));
+ se_16 =
+ _mm_add_epi16(se_16, _mm_max_epi16(sethi, _mm_subs_epi16(z, sethi)));
+ sev_16 = _mm_add_epi16(sev_16,
+ _mm_max_epi16(sevtlo, _mm_subs_epi16(z, sevtlo)));
+ sev_16 = _mm_add_epi16(sev_16,
+ _mm_max_epi16(sevthi, _mm_subs_epi16(z, sevthi)));
+ seh_16 = _mm_add_epi16(seh_16,
+ _mm_max_epi16(sehtlo, _mm_subs_epi16(z, sehtlo)));
+ seh_16 = _mm_add_epi16(seh_16,
+ _mm_max_epi16(sehthi, _mm_subs_epi16(z, sehthi)));
}
// Add to 32 bit running sum as to not roll over.
- se_32 = _mm_add_epi32(se_32, _mm_add_epi32(_mm_unpackhi_epi16(se_16,z),
- _mm_unpacklo_epi16(se_16,z)));
- sev_32 = _mm_add_epi32(sev_32, _mm_add_epi32(_mm_unpackhi_epi16(sev_16,z),
- _mm_unpacklo_epi16(sev_16,z)));
- seh_32 = _mm_add_epi32(seh_32, _mm_add_epi32(_mm_unpackhi_epi16(seh_16,z),
- _mm_unpacklo_epi16(seh_16,z)));
- msa_32 = _mm_add_epi32(msa_32, _mm_add_epi32(_mm_unpackhi_epi16(msa_16,z),
- _mm_unpacklo_epi16(msa_16,z)));
+ se_32 = _mm_add_epi32(se_32, _mm_add_epi32(_mm_unpackhi_epi16(se_16, z),
+ _mm_unpacklo_epi16(se_16, z)));
+ sev_32 =
+ _mm_add_epi32(sev_32, _mm_add_epi32(_mm_unpackhi_epi16(sev_16, z),
+ _mm_unpacklo_epi16(sev_16, z)));
+ seh_32 =
+ _mm_add_epi32(seh_32, _mm_add_epi32(_mm_unpackhi_epi16(seh_16, z),
+ _mm_unpacklo_epi16(seh_16, z)));
+ msa_32 =
+ _mm_add_epi32(msa_32, _mm_add_epi32(_mm_unpackhi_epi16(msa_16, z),
+ _mm_unpacklo_epi16(msa_16, z)));
imgBuf += width_ * skip_num_;
}
@@ -224,30 +231,30 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
// Bring sums out of vector registers and into integer register
// domain, summing them along the way.
- _mm_store_si128 (&se_128, _mm_add_epi64(_mm_unpackhi_epi32(se_32,z),
- _mm_unpacklo_epi32(se_32,z)));
- _mm_store_si128 (&sev_128, _mm_add_epi64(_mm_unpackhi_epi32(sev_32,z),
- _mm_unpacklo_epi32(sev_32,z)));
- _mm_store_si128 (&seh_128, _mm_add_epi64(_mm_unpackhi_epi32(seh_32,z),
- _mm_unpacklo_epi32(seh_32,z)));
- _mm_store_si128 (&msa_128, _mm_add_epi64(_mm_unpackhi_epi32(msa_32,z),
- _mm_unpacklo_epi32(msa_32,z)));
-
- uint64_t *se_64 = reinterpret_cast<uint64_t*>(&se_128);
- uint64_t *sev_64 = reinterpret_cast<uint64_t*>(&sev_128);
- uint64_t *seh_64 = reinterpret_cast<uint64_t*>(&seh_128);
- uint64_t *msa_64 = reinterpret_cast<uint64_t*>(&msa_128);
-
- const uint32_t spatialErrSum = se_64[0] + se_64[1];
+ _mm_store_si128(&se_128, _mm_add_epi64(_mm_unpackhi_epi32(se_32, z),
+ _mm_unpacklo_epi32(se_32, z)));
+ _mm_store_si128(&sev_128, _mm_add_epi64(_mm_unpackhi_epi32(sev_32, z),
+ _mm_unpacklo_epi32(sev_32, z)));
+ _mm_store_si128(&seh_128, _mm_add_epi64(_mm_unpackhi_epi32(seh_32, z),
+ _mm_unpacklo_epi32(seh_32, z)));
+ _mm_store_si128(&msa_128, _mm_add_epi64(_mm_unpackhi_epi32(msa_32, z),
+ _mm_unpacklo_epi32(msa_32, z)));
+
+ uint64_t* se_64 = reinterpret_cast<uint64_t*>(&se_128);
+ uint64_t* sev_64 = reinterpret_cast<uint64_t*>(&sev_128);
+ uint64_t* seh_64 = reinterpret_cast<uint64_t*>(&seh_128);
+ uint64_t* msa_64 = reinterpret_cast<uint64_t*>(&msa_128);
+
+ const uint32_t spatialErrSum = se_64[0] + se_64[1];
const uint32_t spatialErrVSum = sev_64[0] + sev_64[1];
const uint32_t spatialErrHSum = seh_64[0] + seh_64[1];
const uint32_t pixelMSA = msa_64[0] + msa_64[1];
// Normalize over all pixels.
- const float spatialErr = (float)(spatialErrSum >> 2);
- const float spatialErrH = (float)(spatialErrHSum >> 1);
- const float spatialErrV = (float)(spatialErrVSum >> 1);
- const float norm = (float)pixelMSA;
+ const float spatialErr = static_cast<float>(spatialErrSum >> 2);
+ const float spatialErrH = static_cast<float>(spatialErrHSum >> 1);
+ const float spatialErrV = static_cast<float>(spatialErrVSum >> 1);
+ const float norm = static_cast<float>(pixelMSA);
// 2X2:
spatial_pred_err_ = spatialErr / norm;
@@ -258,7 +265,7 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
// 2X1:
spatial_pred_err_v_ = spatialErrV / norm;
- return VPM_OK;
+ return VPM_OK;
}
} // namespace webrtc
diff --git a/webrtc/modules/video_processing/main/source/deflickering.cc b/webrtc/modules/video_processing/deflickering.cc
index 19bc641ac9..0e936ce9b7 100644
--- a/webrtc/modules/video_processing/main/source/deflickering.cc
+++ b/webrtc/modules/video_processing/deflickering.cc
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_processing/main/source/deflickering.h"
+#include "webrtc/modules/video_processing/deflickering.h"
#include <math.h>
#include <stdlib.h>
+#include "webrtc/base/logging.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
-#include "webrtc/system_wrappers/include/logging.h"
#include "webrtc/system_wrappers/include/sort.h"
namespace webrtc {
@@ -40,16 +40,17 @@ enum { kLog2OfDownsamplingFactor = 3 };
// >> fprintf('%d, ', probUW16)
// Resolution reduced to avoid overflow when multiplying with the
// (potentially) large number of pixels.
-const uint16_t VPMDeflickering::prob_uw16_[kNumProbs] = {102, 205, 410, 614,
- 819, 1024, 1229, 1434, 1638, 1843, 1946, 1987}; // <Q11>
+const uint16_t VPMDeflickering::prob_uw16_[kNumProbs] = {
+ 102, 205, 410, 614, 819, 1024,
+ 1229, 1434, 1638, 1843, 1946, 1987}; // <Q11>
// To generate in Matlab:
// >> numQuants = 14; maxOnlyLength = 5;
// >> weightUW16 = round(2^15 *
// [linspace(0.5, 1.0, numQuants - maxOnlyLength)]);
// >> fprintf('%d, %d,\n ', weightUW16);
-const uint16_t VPMDeflickering::weight_uw16_[kNumQuants - kMaxOnlyLength] =
- {16384, 18432, 20480, 22528, 24576, 26624, 28672, 30720, 32768}; // <Q15>
+const uint16_t VPMDeflickering::weight_uw16_[kNumQuants - kMaxOnlyLength] = {
+ 16384, 18432, 20480, 22528, 24576, 26624, 28672, 30720, 32768}; // <Q15>
VPMDeflickering::VPMDeflickering() {
Reset();
@@ -70,8 +71,8 @@ void VPMDeflickering::Reset() {
quant_hist_uw8_[0][kNumQuants - 1] = 255;
for (int32_t i = 0; i < kNumProbs; i++) {
// Unsigned round. <Q0>
- quant_hist_uw8_[0][i + 1] = static_cast<uint8_t>(
- (prob_uw16_[i] * 255 + (1 << 10)) >> 11);
+ quant_hist_uw8_[0][i + 1] =
+ static_cast<uint8_t>((prob_uw16_[i] * 255 + (1 << 10)) >> 11);
}
for (int32_t i = 1; i < kFrameHistory_size; i++) {
@@ -80,9 +81,8 @@ void VPMDeflickering::Reset() {
}
}
-int32_t VPMDeflickering::ProcessFrame(
- VideoFrame* frame,
- VideoProcessingModule::FrameStats* stats) {
+int32_t VPMDeflickering::ProcessFrame(VideoFrame* frame,
+ VideoProcessing::FrameStats* stats) {
assert(frame);
uint32_t frame_memory;
uint8_t quant_uw8[kNumQuants];
@@ -107,11 +107,12 @@ int32_t VPMDeflickering::ProcessFrame(
return VPM_GENERAL_ERROR;
}
- if (!VideoProcessingModule::ValidFrameStats(*stats)) {
+ if (!VideoProcessing::ValidFrameStats(*stats)) {
return VPM_GENERAL_ERROR;
}
- if (PreDetection(frame->timestamp(), *stats) == -1) return VPM_GENERAL_ERROR;
+ if (PreDetection(frame->timestamp(), *stats) == -1)
+ return VPM_GENERAL_ERROR;
// Flicker detection
int32_t det_flicker = DetectFlicker();
@@ -124,13 +125,13 @@ int32_t VPMDeflickering::ProcessFrame(
// Size of luminance component.
const uint32_t y_size = height * width;
- const uint32_t y_sub_size = width * (((height - 1) >>
- kLog2OfDownsamplingFactor) + 1);
+ const uint32_t y_sub_size =
+ width * (((height - 1) >> kLog2OfDownsamplingFactor) + 1);
uint8_t* y_sorted = new uint8_t[y_sub_size];
uint32_t sort_row_idx = 0;
for (int i = 0; i < height; i += kDownsamplingFactor) {
- memcpy(y_sorted + sort_row_idx * width,
- frame->buffer(kYPlane) + i * width, width);
+ memcpy(y_sorted + sort_row_idx * width, frame->buffer(kYPlane) + i * width,
+ width);
sort_row_idx++;
}
@@ -153,12 +154,12 @@ int32_t VPMDeflickering::ProcessFrame(
quant_uw8[i + 1] = y_sorted[prob_idx_uw32];
}
- delete [] y_sorted;
+ delete[] y_sorted;
y_sorted = NULL;
// Shift history for new frame.
memmove(quant_hist_uw8_[1], quant_hist_uw8_[0],
- (kFrameHistory_size - 1) * kNumQuants * sizeof(uint8_t));
+ (kFrameHistory_size - 1) * kNumQuants * sizeof(uint8_t));
// Store current frame in history.
memcpy(quant_hist_uw8_[0], quant_uw8, kNumQuants * sizeof(uint8_t));
@@ -190,9 +191,10 @@ int32_t VPMDeflickering::ProcessFrame(
// target = w * maxquant_uw8 + (1 - w) * minquant_uw8
// Weights w = |weight_uw16_| are in Q15, hence the final output has to be
// right shifted by 8 to end up in Q7.
- target_quant_uw16[i] = static_cast<uint16_t>((
- weight_uw16_[i] * maxquant_uw8[i] +
- ((1 << 15) - weight_uw16_[i]) * minquant_uw8[i]) >> 8); // <Q7>
+ target_quant_uw16[i] = static_cast<uint16_t>(
+ (weight_uw16_[i] * maxquant_uw8[i] +
+ ((1 << 15) - weight_uw16_[i]) * minquant_uw8[i]) >>
+ 8); // <Q7>
}
for (int32_t i = kNumQuants - kMaxOnlyLength; i < kNumQuants; i++) {
@@ -203,13 +205,14 @@ int32_t VPMDeflickering::ProcessFrame(
uint16_t mapUW16; // <Q7>
for (int32_t i = 1; i < kNumQuants; i++) {
// As quant and targetQuant are limited to UWord8, it's safe to use Q7 here.
- tmp_uw32 = static_cast<uint32_t>(target_quant_uw16[i] -
- target_quant_uw16[i - 1]);
+ tmp_uw32 =
+ static_cast<uint32_t>(target_quant_uw16[i] - target_quant_uw16[i - 1]);
tmp_uw16 = static_cast<uint16_t>(quant_uw8[i] - quant_uw8[i - 1]); // <Q0>
if (tmp_uw16 > 0) {
- increment_uw16 = static_cast<uint16_t>(WebRtcSpl_DivU32U16(tmp_uw32,
- tmp_uw16)); // <Q7>
+ increment_uw16 =
+ static_cast<uint16_t>(WebRtcSpl_DivU32U16(tmp_uw32,
+ tmp_uw16)); // <Q7>
} else {
// The value is irrelevant; the loop below will only iterate once.
increment_uw16 = 0;
@@ -230,7 +233,7 @@ int32_t VPMDeflickering::ProcessFrame(
}
// Frame was altered, so reset stats.
- VideoProcessingModule::ClearFrameStats(stats);
+ VideoProcessing::ClearFrameStats(stats);
return VPM_OK;
}
@@ -247,8 +250,9 @@ int32_t VPMDeflickering::ProcessFrame(
zero.\n
-1: Error
*/
-int32_t VPMDeflickering::PreDetection(const uint32_t timestamp,
- const VideoProcessingModule::FrameStats& stats) {
+int32_t VPMDeflickering::PreDetection(
+ const uint32_t timestamp,
+ const VideoProcessing::FrameStats& stats) {
int32_t mean_val; // Mean value of frame (Q4)
uint32_t frame_rate = 0;
int32_t meanBufferLength; // Temp variable.
@@ -257,16 +261,16 @@ int32_t VPMDeflickering::PreDetection(const uint32_t timestamp,
// Update mean value buffer.
// This should be done even though we might end up in an unreliable detection.
memmove(mean_buffer_ + 1, mean_buffer_,
- (kMeanBufferLength - 1) * sizeof(int32_t));
+ (kMeanBufferLength - 1) * sizeof(int32_t));
mean_buffer_[0] = mean_val;
// Update timestamp buffer.
// This should be done even though we might end up in an unreliable detection.
- memmove(timestamp_buffer_ + 1, timestamp_buffer_, (kMeanBufferLength - 1) *
- sizeof(uint32_t));
+ memmove(timestamp_buffer_ + 1, timestamp_buffer_,
+ (kMeanBufferLength - 1) * sizeof(uint32_t));
timestamp_buffer_[0] = timestamp;
-/* Compute current frame rate (Q4) */
+ /* Compute current frame rate (Q4) */
if (timestamp_buffer_[kMeanBufferLength - 1] != 0) {
frame_rate = ((90000 << 4) * (kMeanBufferLength - 1));
frame_rate /=
@@ -315,22 +319,22 @@ int32_t VPMDeflickering::PreDetection(const uint32_t timestamp,
-1: Error
*/
int32_t VPMDeflickering::DetectFlicker() {
- uint32_t i;
- int32_t freqEst; // (Q4) Frequency estimate to base detection upon
- int32_t ret_val = -1;
+ uint32_t i;
+ int32_t freqEst; // (Q4) Frequency estimate to base detection upon
+ int32_t ret_val = -1;
/* Sanity check for mean_buffer_length_ */
if (mean_buffer_length_ < 2) {
/* Not possible to estimate frequency */
- return(2);
+ return 2;
}
// Count zero crossings with a dead zone to be robust against noise. If the
// noise std is 2 pixel this corresponds to about 95% confidence interval.
int32_t deadzone = (kZeroCrossingDeadzone << kmean_valueScaling); // Q4
int32_t meanOfBuffer = 0; // Mean value of mean value buffer.
- int32_t numZeros = 0; // Number of zeros that cross the dead-zone.
- int32_t cntState = 0; // State variable for zero crossing regions.
- int32_t cntStateOld = 0; // Previous state for zero crossing regions.
+ int32_t numZeros = 0; // Number of zeros that cross the dead-zone.
+ int32_t cntState = 0; // State variable for zero crossing regions.
+ int32_t cntStateOld = 0; // Previous state for zero crossing regions.
for (i = 0; i < mean_buffer_length_; i++) {
meanOfBuffer += mean_buffer_[i];
@@ -371,7 +375,7 @@ int32_t VPMDeflickering::DetectFlicker() {
int32_t freqAlias = freqEst;
if (freqEst > kMinFrequencyToDetect) {
uint8_t aliasState = 1;
- while(freqState == 0) {
+ while (freqState == 0) {
/* Increase frequency */
freqAlias += (aliasState * frame_rate_);
freqAlias += ((freqEst << 1) * (1 - (aliasState << 1)));
diff --git a/webrtc/modules/video_processing/main/source/deflickering.h b/webrtc/modules/video_processing/deflickering.h
index 36e6845d71..3ff2723aba 100644
--- a/webrtc/modules/video_processing/main/source/deflickering.h
+++ b/webrtc/modules/video_processing/deflickering.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCEdeflickering__H
-#define WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCEdeflickering__H
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_DEFLICKERING_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_DEFLICKERING_H_
#include <string.h> // NULL
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -24,12 +24,11 @@ class VPMDeflickering {
~VPMDeflickering();
void Reset();
- int32_t ProcessFrame(VideoFrame* frame,
- VideoProcessingModule::FrameStats* stats);
+ int32_t ProcessFrame(VideoFrame* frame, VideoProcessing::FrameStats* stats);
private:
int32_t PreDetection(uint32_t timestamp,
- const VideoProcessingModule::FrameStats& stats);
+ const VideoProcessing::FrameStats& stats);
int32_t DetectFlicker();
@@ -39,13 +38,13 @@ class VPMDeflickering {
enum { kNumQuants = kNumProbs + 2 };
enum { kMaxOnlyLength = 5 };
- uint32_t mean_buffer_length_;
- uint8_t detection_state_; // 0: No flickering
- // 1: Flickering detected
- // 2: In flickering
- int32_t mean_buffer_[kMeanBufferLength];
- uint32_t timestamp_buffer_[kMeanBufferLength];
- uint32_t frame_rate_;
+ uint32_t mean_buffer_length_;
+ uint8_t detection_state_; // 0: No flickering
+ // 1: Flickering detected
+ // 2: In flickering
+ int32_t mean_buffer_[kMeanBufferLength];
+ uint32_t timestamp_buffer_[kMeanBufferLength];
+ uint32_t frame_rate_;
static const uint16_t prob_uw16_[kNumProbs];
static const uint16_t weight_uw16_[kNumQuants - kMaxOnlyLength];
uint8_t quant_hist_uw8_[kFrameHistory_size][kNumQuants];
@@ -53,4 +52,4 @@ class VPMDeflickering {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCEdeflickering__H
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_DEFLICKERING_H_
diff --git a/webrtc/modules/video_processing/main/source/frame_preprocessor.cc b/webrtc/modules/video_processing/frame_preprocessor.cc
index a9d77c2e0c..6778a597be 100644
--- a/webrtc/modules/video_processing/main/source/frame_preprocessor.cc
+++ b/webrtc/modules/video_processing/frame_preprocessor.cc
@@ -8,12 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_processing/main/source/frame_preprocessor.h"
+#include "webrtc/modules/video_processing/frame_preprocessor.h"
+
+#include "webrtc/modules/video_processing/video_denoiser.h"
namespace webrtc {
VPMFramePreprocessor::VPMFramePreprocessor()
- : content_metrics_(NULL),
+ : content_metrics_(nullptr),
resampled_frame_(),
enable_ca_(false),
frame_cnt_(0) {
@@ -24,21 +26,21 @@ VPMFramePreprocessor::VPMFramePreprocessor()
VPMFramePreprocessor::~VPMFramePreprocessor() {
Reset();
- delete spatial_resampler_;
delete ca_;
delete vd_;
+ delete spatial_resampler_;
}
-void VPMFramePreprocessor::Reset() {
+void VPMFramePreprocessor::Reset() {
ca_->Release();
vd_->Reset();
- content_metrics_ = NULL;
+ content_metrics_ = nullptr;
spatial_resampler_->Reset();
enable_ca_ = false;
frame_cnt_ = 0;
}
-void VPMFramePreprocessor::EnableTemporalDecimation(bool enable) {
+void VPMFramePreprocessor::EnableTemporalDecimation(bool enable) {
vd_->EnableTemporalDecimation(enable);
}
@@ -46,20 +48,22 @@ void VPMFramePreprocessor::EnableContentAnalysis(bool enable) {
enable_ca_ = enable;
}
-void VPMFramePreprocessor::SetInputFrameResampleMode(
+void VPMFramePreprocessor::SetInputFrameResampleMode(
VideoFrameResampling resampling_mode) {
spatial_resampler_->SetInputFrameResampleMode(resampling_mode);
}
-int32_t VPMFramePreprocessor::SetTargetResolution(
- uint32_t width, uint32_t height, uint32_t frame_rate) {
- if ( (width == 0) || (height == 0) || (frame_rate == 0)) {
+int32_t VPMFramePreprocessor::SetTargetResolution(uint32_t width,
+ uint32_t height,
+ uint32_t frame_rate) {
+ if ((width == 0) || (height == 0) || (frame_rate == 0)) {
return VPM_PARAMETER_ERROR;
}
int32_t ret_val = 0;
ret_val = spatial_resampler_->SetTargetFrameSize(width, height);
- if (ret_val < 0) return ret_val;
+ if (ret_val < 0)
+ return ret_val;
vd_->SetTargetFramerate(frame_rate);
return VPM_OK;
@@ -78,59 +82,60 @@ void VPMFramePreprocessor::UpdateIncomingframe_rate() {
vd_->UpdateIncomingframe_rate();
}
-uint32_t VPMFramePreprocessor::Decimatedframe_rate() {
- return vd_->Decimatedframe_rate();
+uint32_t VPMFramePreprocessor::GetDecimatedFrameRate() {
+ return vd_->GetDecimatedFrameRate();
}
-
-uint32_t VPMFramePreprocessor::DecimatedWidth() const {
+uint32_t VPMFramePreprocessor::GetDecimatedWidth() const {
return spatial_resampler_->TargetWidth();
}
-
-uint32_t VPMFramePreprocessor::DecimatedHeight() const {
+uint32_t VPMFramePreprocessor::GetDecimatedHeight() const {
return spatial_resampler_->TargetHeight();
}
-int32_t VPMFramePreprocessor::PreprocessFrame(const VideoFrame& frame,
- VideoFrame** processed_frame) {
+void VPMFramePreprocessor::EnableDenosing(bool enable) {
+ denoiser_.reset(new VideoDenoiser(true));
+}
+
+const VideoFrame* VPMFramePreprocessor::PreprocessFrame(
+ const VideoFrame& frame) {
if (frame.IsZeroSize()) {
- return VPM_PARAMETER_ERROR;
+ return nullptr;
}
vd_->UpdateIncomingframe_rate();
-
if (vd_->DropFrame()) {
- return 1; // drop 1 frame
+ return nullptr;
}
- // Resizing incoming frame if needed. Otherwise, remains NULL.
- // We are not allowed to resample the input frame (must make a copy of it).
- *processed_frame = NULL;
- if (spatial_resampler_->ApplyResample(frame.width(), frame.height())) {
- int32_t ret = spatial_resampler_->ResampleFrame(frame, &resampled_frame_);
- if (ret != VPM_OK) return ret;
- *processed_frame = &resampled_frame_;
+ const VideoFrame* current_frame = &frame;
+ if (denoiser_) {
+ denoiser_->DenoiseFrame(*current_frame, &denoised_frame_);
+ current_frame = &denoised_frame_;
+ }
+
+ if (spatial_resampler_->ApplyResample(current_frame->width(),
+ current_frame->height())) {
+ if (spatial_resampler_->ResampleFrame(*current_frame, &resampled_frame_) !=
+ VPM_OK) {
+ return nullptr;
+ }
+ current_frame = &resampled_frame_;
}
// Perform content analysis on the frame to be encoded.
- if (enable_ca_) {
+ if (enable_ca_ && frame_cnt_ % kSkipFrameCA == 0) {
// Compute new metrics every |kSkipFramesCA| frames, starting with
// the first frame.
- if (frame_cnt_ % kSkipFrameCA == 0) {
- if (*processed_frame == NULL) {
- content_metrics_ = ca_->ComputeContentMetrics(frame);
- } else {
- content_metrics_ = ca_->ComputeContentMetrics(resampled_frame_);
- }
- }
- ++frame_cnt_;
+ content_metrics_ = ca_->ComputeContentMetrics(*current_frame);
}
- return VPM_OK;
+ ++frame_cnt_;
+ return current_frame;
}
-VideoContentMetrics* VPMFramePreprocessor::ContentMetrics() const {
+VideoContentMetrics* VPMFramePreprocessor::GetContentMetrics() const {
return content_metrics_;
}
-} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/main/source/frame_preprocessor.h b/webrtc/modules/video_processing/frame_preprocessor.h
index 895e457cc6..5bdc576f37 100644
--- a/webrtc/modules/video_processing/main/source/frame_preprocessor.h
+++ b/webrtc/modules/video_processing/frame_preprocessor.h
@@ -8,20 +8,23 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-/*
- * frame_preprocessor.h
- */
-#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_FRAME_PREPROCESSOR_H
-#define WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_FRAME_PREPROCESSOR_H
-
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/modules/video_processing/main/source/content_analysis.h"
-#include "webrtc/modules/video_processing/main/source/spatial_resampler.h"
-#include "webrtc/modules/video_processing/main/source/video_decimator.h"
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_FRAME_PREPROCESSOR_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_FRAME_PREPROCESSOR_H_
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/content_analysis.h"
+#include "webrtc/modules/video_processing/spatial_resampler.h"
+#include "webrtc/modules/video_processing/video_decimator.h"
#include "webrtc/typedefs.h"
+#include "webrtc/video_frame.h"
namespace webrtc {
+class VideoDenoiser;
+
+// All pointers/members in this class are assumed to be protected by the class
+// owner.
class VPMFramePreprocessor {
public:
VPMFramePreprocessor();
@@ -38,7 +41,8 @@ class VPMFramePreprocessor {
void EnableContentAnalysis(bool enable);
// Set target resolution: frame rate and dimension.
- int32_t SetTargetResolution(uint32_t width, uint32_t height,
+ int32_t SetTargetResolution(uint32_t width,
+ uint32_t height,
uint32_t frame_rate);
// Set target frame rate.
@@ -50,14 +54,14 @@ class VPMFramePreprocessor {
int32_t updateIncomingFrameSize(uint32_t width, uint32_t height);
// Set decimated values: frame rate/dimension.
- uint32_t Decimatedframe_rate();
- uint32_t DecimatedWidth() const;
- uint32_t DecimatedHeight() const;
+ uint32_t GetDecimatedFrameRate();
+ uint32_t GetDecimatedWidth() const;
+ uint32_t GetDecimatedHeight() const;
// Preprocess output:
- int32_t PreprocessFrame(const VideoFrame& frame,
- VideoFrame** processed_frame);
- VideoContentMetrics* ContentMetrics() const;
+ void EnableDenosing(bool enable);
+ const VideoFrame* PreprocessFrame(const VideoFrame& frame);
+ VideoContentMetrics* GetContentMetrics() const;
private:
// The content does not change so much every frame, so to reduce complexity
@@ -65,15 +69,16 @@ class VPMFramePreprocessor {
enum { kSkipFrameCA = 2 };
VideoContentMetrics* content_metrics_;
+ VideoFrame denoised_frame_;
VideoFrame resampled_frame_;
VPMSpatialResampler* spatial_resampler_;
VPMContentAnalysis* ca_;
VPMVideoDecimator* vd_;
+ rtc::scoped_ptr<VideoDenoiser> denoiser_;
bool enable_ca_;
- int frame_cnt_;
-
+ uint32_t frame_cnt_;
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_FRAME_PREPROCESSOR_H
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_FRAME_PREPROCESSOR_H_
diff --git a/webrtc/modules/video_processing/include/video_processing.h b/webrtc/modules/video_processing/include/video_processing.h
new file mode 100644
index 0000000000..a8d6358887
--- /dev/null
+++ b/webrtc/modules/video_processing/include/video_processing.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_INCLUDE_VIDEO_PROCESSING_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_INCLUDE_VIDEO_PROCESSING_H_
+
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_processing/include/video_processing_defines.h"
+#include "webrtc/video_frame.h"
+
+// The module is largely intended to process video streams, except functionality
+// provided by static functions which operate independent of previous frames. It
+// is recommended, but not required that a unique instance be used for each
+// concurrently processed stream. Similarly, it is recommended to call Reset()
+// before switching to a new stream, but this is not absolutely required.
+//
+// The module provides basic thread safety by permitting only a single function
+// to execute concurrently.
+
+namespace webrtc {
+
+class VideoProcessing {
+ public:
+ struct FrameStats {
+ uint32_t hist[256]; // Frame histogram.
+ uint32_t mean;
+ uint32_t sum;
+ uint32_t num_pixels;
+ uint32_t sub_sampling_factor; // Sub-sampling factor, in powers of 2.
+ };
+
+ enum BrightnessWarning { kNoWarning, kDarkWarning, kBrightWarning };
+
+ static VideoProcessing* Create();
+ virtual ~VideoProcessing() {}
+
+ // Retrieves statistics for the input frame. This function must be used to
+ // prepare a FrameStats struct for use in certain VPM functions.
+ static void GetFrameStats(const VideoFrame& frame, FrameStats* stats);
+
+ // Checks the validity of a FrameStats struct. Currently, valid implies only
+ // that is had changed from its initialized state.
+ static bool ValidFrameStats(const FrameStats& stats);
+
+ static void ClearFrameStats(FrameStats* stats);
+
+ // Increases/decreases the luminance value. 'delta' can be in the range {}
+ static void Brighten(int delta, VideoFrame* frame);
+
+ // Detects and removes camera flicker from a video stream. Every frame from
+ // the stream must be passed in. A frame will only be altered if flicker has
+ // been detected. Has a fixed-point implementation.
+ // Frame statistics provided by GetFrameStats(). On return the stats will
+ // be reset to zero if the frame was altered. Call GetFrameStats() again
+ // if the statistics for the altered frame are required.
+ virtual int32_t Deflickering(VideoFrame* frame, FrameStats* stats) = 0;
+
+ // Detects if a video frame is excessively bright or dark. Returns a
+ // warning if this is the case. Multiple frames should be passed in before
+ // expecting a warning. Has a floating-point implementation.
+ virtual int32_t BrightnessDetection(const VideoFrame& frame,
+ const FrameStats& stats) = 0;
+
+ // The following functions refer to the pre-processor unit within VPM. The
+ // pre-processor perfoms spatial/temporal decimation and content analysis on
+ // the frames prior to encoding.
+
+ // Enable/disable temporal decimation
+ virtual void EnableTemporalDecimation(bool enable) = 0;
+
+ virtual int32_t SetTargetResolution(uint32_t width,
+ uint32_t height,
+ uint32_t frame_rate) = 0;
+
+ virtual void SetTargetFramerate(int frame_rate) = 0;
+
+ virtual uint32_t GetDecimatedFrameRate() = 0;
+ virtual uint32_t GetDecimatedWidth() const = 0;
+ virtual uint32_t GetDecimatedHeight() const = 0;
+
+ // Set the spatial resampling settings of the VPM according to
+ // VideoFrameResampling.
+ virtual void SetInputFrameResampleMode(
+ VideoFrameResampling resampling_mode) = 0;
+
+ virtual void EnableDenosing(bool enable) = 0;
+ virtual const VideoFrame* PreprocessFrame(const VideoFrame& frame) = 0;
+
+ virtual VideoContentMetrics* GetContentMetrics() const = 0;
+ virtual void EnableContentAnalysis(bool enable) = 0;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_INCLUDE_VIDEO_PROCESSING_H_
diff --git a/webrtc/modules/video_processing/main/interface/video_processing_defines.h b/webrtc/modules/video_processing/include/video_processing_defines.h
index 93a0658966..9cc71bde27 100644
--- a/webrtc/modules/video_processing/main/interface/video_processing_defines.h
+++ b/webrtc/modules/video_processing/include/video_processing_defines.h
@@ -13,29 +13,29 @@
* This header file includes the definitions used in the video processor module
*/
-#ifndef WEBRTC_MODULES_INTERFACE_VIDEO_PROCESSING_DEFINES_H
-#define WEBRTC_MODULES_INTERFACE_VIDEO_PROCESSING_DEFINES_H
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_INCLUDE_VIDEO_PROCESSING_DEFINES_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_INCLUDE_VIDEO_PROCESSING_DEFINES_H_
#include "webrtc/typedefs.h"
namespace webrtc {
// Error codes
-#define VPM_OK 0
-#define VPM_GENERAL_ERROR -1
-#define VPM_MEMORY -2
-#define VPM_PARAMETER_ERROR -3
-#define VPM_SCALE_ERROR -4
-#define VPM_UNINITIALIZED -5
-#define VPM_UNIMPLEMENTED -6
+#define VPM_OK 0
+#define VPM_GENERAL_ERROR -1
+#define VPM_MEMORY -2
+#define VPM_PARAMETER_ERROR -3
+#define VPM_SCALE_ERROR -4
+#define VPM_UNINITIALIZED -5
+#define VPM_UNIMPLEMENTED -6
enum VideoFrameResampling {
- kNoRescaling, // Disables rescaling.
- kFastRescaling, // Point filter.
- kBiLinear, // Bi-linear interpolation.
- kBox, // Box inteprolation.
+ kNoRescaling, // Disables rescaling.
+ kFastRescaling, // Point filter.
+ kBiLinear, // Bi-linear interpolation.
+ kBox, // Box inteprolation.
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_INTERFACE_VIDEO_PROCESSING_DEFINES_H
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_INCLUDE_VIDEO_PROCESSING_DEFINES_H_
diff --git a/webrtc/modules/video_processing/main/interface/video_processing.h b/webrtc/modules/video_processing/main/interface/video_processing.h
deleted file mode 100644
index 30af99fb8e..0000000000
--- a/webrtc/modules/video_processing/main/interface/video_processing.h
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-/*
- * video_processing.h
- * This header file contains the API required for the video
- * processing module class.
- */
-
-
-#ifndef WEBRTC_MODULES_INTERFACE_VIDEO_PROCESSING_H
-#define WEBRTC_MODULES_INTERFACE_VIDEO_PROCESSING_H
-
-#include "webrtc/modules/interface/module.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing_defines.h"
-#include "webrtc/video_frame.h"
-
-/**
- The module is largely intended to process video streams, except functionality
- provided by static functions which operate independent of previous frames. It
- is recommended, but not required that a unique instance be used for each
- concurrently processed stream. Similarly, it is recommended to call Reset()
- before switching to a new stream, but this is not absolutely required.
-
- The module provides basic thread safety by permitting only a single function
- to execute concurrently.
-*/
-
-namespace webrtc {
-
-class VideoProcessingModule : public Module {
- public:
- /**
- Structure to hold frame statistics. Populate it with GetFrameStats().
- */
- struct FrameStats {
- FrameStats() :
- mean(0),
- sum(0),
- num_pixels(0),
- subSamplWidth(0),
- subSamplHeight(0) {
- memset(hist, 0, sizeof(hist));
- }
-
- uint32_t hist[256]; // FRame histogram.
- uint32_t mean; // Frame Mean value.
- uint32_t sum; // Sum of frame.
- uint32_t num_pixels; // Number of pixels.
- uint8_t subSamplWidth; // Subsampling rate of width in powers of 2.
- uint8_t subSamplHeight; // Subsampling rate of height in powers of 2.
-};
-
- /**
- Specifies the warning types returned by BrightnessDetection().
- */
- enum BrightnessWarning {
- kNoWarning, // Frame has acceptable brightness.
- kDarkWarning, // Frame is too dark.
- kBrightWarning // Frame is too bright.
- };
-
- /*
- Creates a VPM object.
-
- \param[in] id
- Unique identifier of this object.
-
- \return Pointer to a VPM object.
- */
- static VideoProcessingModule* Create();
-
- /**
- Destroys a VPM object.
-
- \param[in] module
- Pointer to the VPM object to destroy.
- */
- static void Destroy(VideoProcessingModule* module);
-
- /**
- Not supported.
- */
- int64_t TimeUntilNextProcess() override { return -1; }
-
- /**
- Not supported.
- */
- int32_t Process() override { return -1; }
-
- /**
- Resets all processing components to their initial states. This should be
- called whenever a new video stream is started.
- */
- virtual void Reset() = 0;
-
- /**
- Retrieves statistics for the input frame. This function must be used to
- prepare a FrameStats struct for use in certain VPM functions.
-
- \param[out] stats
- The frame statistics will be stored here on return.
-
- \param[in] frame
- Reference to the video frame.
-
- \return 0 on success, -1 on failure.
- */
- static int32_t GetFrameStats(FrameStats* stats, const VideoFrame& frame);
-
- /**
- Checks the validity of a FrameStats struct. Currently, valid implies only
- that is had changed from its initialized state.
-
- \param[in] stats
- Frame statistics.
-
- \return True on valid stats, false on invalid stats.
- */
- static bool ValidFrameStats(const FrameStats& stats);
-
- /**
- Returns a FrameStats struct to its intialized state.
-
- \param[in,out] stats
- Frame statistics.
- */
- static void ClearFrameStats(FrameStats* stats);
-
- /**
- Increases/decreases the luminance value.
-
- \param[in,out] frame
- Pointer to the video frame.
-
- \param[in] delta
- The amount to change the chrominance value of every single pixel.
- Can be < 0 also.
-
- \return 0 on success, -1 on failure.
- */
- static int32_t Brighten(VideoFrame* frame, int delta);
-
- /**
- Detects and removes camera flicker from a video stream. Every frame from
- the stream must be passed in. A frame will only be altered if flicker has
- been detected. Has a fixed-point implementation.
-
- \param[in,out] frame
- Pointer to the video frame.
-
- \param[in,out] stats
- Frame statistics provided by GetFrameStats(). On return the stats will
- be reset to zero if the frame was altered. Call GetFrameStats() again
- if the statistics for the altered frame are required.
-
- \return 0 on success, -1 on failure.
- */
- virtual int32_t Deflickering(VideoFrame* frame, FrameStats* stats) = 0;
-
- /**
- Detects if a video frame is excessively bright or dark. Returns a
- warning if this is the case. Multiple frames should be passed in before
- expecting a warning. Has a floating-point implementation.
-
- \param[in] frame
- Pointer to the video frame.
-
- \param[in] stats
- Frame statistics provided by GetFrameStats().
-
- \return A member of BrightnessWarning on success, -1 on error
- */
- virtual int32_t BrightnessDetection(const VideoFrame& frame,
- const FrameStats& stats) = 0;
-
- /**
- The following functions refer to the pre-processor unit within VPM. The
- pre-processor perfoms spatial/temporal decimation and content analysis on
- the frames prior to encoding.
- */
-
- /**
- Enable/disable temporal decimation
-
- \param[in] enable when true, temporal decimation is enabled
- */
- virtual void EnableTemporalDecimation(bool enable) = 0;
-
- /**
- Set target resolution
-
- \param[in] width
- Target width
-
- \param[in] height
- Target height
-
- \param[in] frame_rate
- Target frame_rate
-
- \return VPM_OK on success, a negative value on error (see error codes)
-
- */
- virtual int32_t SetTargetResolution(uint32_t width,
- uint32_t height,
- uint32_t frame_rate) = 0;
-
- virtual void SetTargetFramerate(int frame_rate) {}
-
- /**
- Get decimated(target) frame rate
- */
- virtual uint32_t Decimatedframe_rate() = 0;
-
- /**
- Get decimated(target) frame width
- */
- virtual uint32_t DecimatedWidth() const = 0;
-
- /**
- Get decimated(target) frame height
- */
- virtual uint32_t DecimatedHeight() const = 0 ;
-
- /**
- Set the spatial resampling settings of the VPM: The resampler may either be
- disabled or one of the following:
- scaling to a close to target dimension followed by crop/pad
-
- \param[in] resampling_mode
- Set resampling mode (a member of VideoFrameResampling)
- */
- virtual void SetInputFrameResampleMode(VideoFrameResampling
- resampling_mode) = 0;
-
- /**
- Get Processed (decimated) frame
-
- \param[in] frame pointer to the video frame.
- \param[in] processed_frame pointer (double) to the processed frame. If no
- processing is required, processed_frame will be NULL.
-
- \return VPM_OK on success, a negative value on error (see error codes)
- */
- virtual int32_t PreprocessFrame(const VideoFrame& frame,
- VideoFrame** processed_frame) = 0;
-
- /**
- Return content metrics for the last processed frame
- */
- virtual VideoContentMetrics* ContentMetrics() const = 0 ;
-
- /**
- Enable content analysis
- */
- virtual void EnableContentAnalysis(bool enable) = 0;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_INTERFACE_VIDEO_PROCESSING_H
diff --git a/webrtc/modules/video_processing/main/source/OWNERS b/webrtc/modules/video_processing/main/source/OWNERS
deleted file mode 100644
index 3ee6b4bf5f..0000000000
--- a/webrtc/modules/video_processing/main/source/OWNERS
+++ /dev/null
@@ -1,5 +0,0 @@
-
-# These are for the common case of adding or renaming files. If you're doing
-# structural changes, please get a review from a reviewer in this file.
-per-file *.gyp=*
-per-file *.gypi=*
diff --git a/webrtc/modules/video_processing/main/source/brighten.cc b/webrtc/modules/video_processing/main/source/brighten.cc
deleted file mode 100644
index 1fe813e7b0..0000000000
--- a/webrtc/modules/video_processing/main/source/brighten.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_processing/main/source/brighten.h"
-
-#include <stdlib.h>
-
-namespace webrtc {
-namespace VideoProcessing {
-
-int32_t Brighten(VideoFrame* frame, int delta) {
- assert(frame);
- if (frame->IsZeroSize()) {
- return VPM_PARAMETER_ERROR;
- }
- if (frame->width() <= 0 || frame->height() <= 0) {
- return VPM_PARAMETER_ERROR;
- }
-
- int num_pixels = frame->width() * frame->height();
-
- int look_up[256];
- for (int i = 0; i < 256; i++) {
- int val = i + delta;
- look_up[i] = ((((val < 0) ? 0 : val) > 255) ? 255 : val);
- }
-
- uint8_t* temp_ptr = frame->buffer(kYPlane);
-
- for (int i = 0; i < num_pixels; i++) {
- *temp_ptr = static_cast<uint8_t>(look_up[*temp_ptr]);
- temp_ptr++;
- }
- return VPM_OK;
-}
-
-} // namespace VideoProcessing
-} // namespace webrtc
diff --git a/webrtc/modules/video_processing/main/source/brighten.h b/webrtc/modules/video_processing/main/source/brighten.h
deleted file mode 100644
index 151d7a3b51..0000000000
--- a/webrtc/modules/video_processing/main/source/brighten.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef MODULES_VIDEO_PROCESSING_MAIN_SOURCE_BRIGHTEN_H_
-#define MODULES_VIDEO_PROCESSING_MAIN_SOURCE_BRIGHTEN_H_
-
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-namespace VideoProcessing {
-
-int32_t Brighten(VideoFrame* frame, int delta);
-
-} // namespace VideoProcessing
-} // namespace webrtc
-
-#endif // MODULES_VIDEO_PROCESSING_MAIN_SOURCE_BRIGHTEN_H_
diff --git a/webrtc/modules/video_processing/main/source/video_processing_impl.cc b/webrtc/modules/video_processing/main/source/video_processing_impl.cc
deleted file mode 100644
index eaaf14f6ad..0000000000
--- a/webrtc/modules/video_processing/main/source/video_processing_impl.cc
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#include "webrtc/modules/video_processing/main/source/video_processing_impl.h"
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/include/logging.h"
-
-#include <assert.h>
-
-namespace webrtc {
-
-namespace {
-void SetSubSampling(VideoProcessingModule::FrameStats* stats,
- const int32_t width,
- const int32_t height) {
- if (width * height >= 640 * 480) {
- stats->subSamplWidth = 3;
- stats->subSamplHeight = 3;
- } else if (width * height >= 352 * 288) {
- stats->subSamplWidth = 2;
- stats->subSamplHeight = 2;
- } else if (width * height >= 176 * 144) {
- stats->subSamplWidth = 1;
- stats->subSamplHeight = 1;
- } else {
- stats->subSamplWidth = 0;
- stats->subSamplHeight = 0;
- }
-}
-} // namespace
-
-VideoProcessingModule* VideoProcessingModule::Create() {
- return new VideoProcessingModuleImpl();
-}
-
-void VideoProcessingModule::Destroy(VideoProcessingModule* module) {
- if (module)
- delete static_cast<VideoProcessingModuleImpl*>(module);
-}
-
-VideoProcessingModuleImpl::VideoProcessingModuleImpl() {}
-VideoProcessingModuleImpl::~VideoProcessingModuleImpl() {}
-
-void VideoProcessingModuleImpl::Reset() {
- rtc::CritScope mutex(&mutex_);
- deflickering_.Reset();
- brightness_detection_.Reset();
- frame_pre_processor_.Reset();
-}
-
-int32_t VideoProcessingModule::GetFrameStats(FrameStats* stats,
- const VideoFrame& frame) {
- if (frame.IsZeroSize()) {
- LOG(LS_ERROR) << "Zero size frame.";
- return VPM_PARAMETER_ERROR;
- }
-
- int width = frame.width();
- int height = frame.height();
-
- ClearFrameStats(stats); // The histogram needs to be zeroed out.
- SetSubSampling(stats, width, height);
-
- const uint8_t* buffer = frame.buffer(kYPlane);
- // Compute histogram and sum of frame
- for (int i = 0; i < height; i += (1 << stats->subSamplHeight)) {
- int k = i * width;
- for (int j = 0; j < width; j += (1 << stats->subSamplWidth)) {
- stats->hist[buffer[k + j]]++;
- stats->sum += buffer[k + j];
- }
- }
-
- stats->num_pixels = (width * height) / ((1 << stats->subSamplWidth) *
- (1 << stats->subSamplHeight));
- assert(stats->num_pixels > 0);
-
- // Compute mean value of frame
- stats->mean = stats->sum / stats->num_pixels;
-
- return VPM_OK;
-}
-
-bool VideoProcessingModule::ValidFrameStats(const FrameStats& stats) {
- if (stats.num_pixels == 0) {
- LOG(LS_WARNING) << "Invalid frame stats.";
- return false;
- }
- return true;
-}
-
-void VideoProcessingModule::ClearFrameStats(FrameStats* stats) {
- stats->mean = 0;
- stats->sum = 0;
- stats->num_pixels = 0;
- stats->subSamplWidth = 0;
- stats->subSamplHeight = 0;
- memset(stats->hist, 0, sizeof(stats->hist));
-}
-
-int32_t VideoProcessingModule::Brighten(VideoFrame* frame, int delta) {
- return VideoProcessing::Brighten(frame, delta);
-}
-
-int32_t VideoProcessingModuleImpl::Deflickering(VideoFrame* frame,
- FrameStats* stats) {
- rtc::CritScope mutex(&mutex_);
- return deflickering_.ProcessFrame(frame, stats);
-}
-
-int32_t VideoProcessingModuleImpl::BrightnessDetection(
- const VideoFrame& frame,
- const FrameStats& stats) {
- rtc::CritScope mutex(&mutex_);
- return brightness_detection_.ProcessFrame(frame, stats);
-}
-
-
-void VideoProcessingModuleImpl::EnableTemporalDecimation(bool enable) {
- rtc::CritScope mutex(&mutex_);
- frame_pre_processor_.EnableTemporalDecimation(enable);
-}
-
-
-void VideoProcessingModuleImpl::SetInputFrameResampleMode(VideoFrameResampling
- resampling_mode) {
- rtc::CritScope cs(&mutex_);
- frame_pre_processor_.SetInputFrameResampleMode(resampling_mode);
-}
-
-int32_t VideoProcessingModuleImpl::SetTargetResolution(uint32_t width,
- uint32_t height,
- uint32_t frame_rate) {
- rtc::CritScope cs(&mutex_);
- return frame_pre_processor_.SetTargetResolution(width, height, frame_rate);
-}
-
-void VideoProcessingModuleImpl::SetTargetFramerate(int frame_rate) {
- rtc::CritScope cs(&mutex_);
- frame_pre_processor_.SetTargetFramerate(frame_rate);
-}
-
-uint32_t VideoProcessingModuleImpl::Decimatedframe_rate() {
- rtc::CritScope cs(&mutex_);
- return frame_pre_processor_.Decimatedframe_rate();
-}
-
-uint32_t VideoProcessingModuleImpl::DecimatedWidth() const {
- rtc::CritScope cs(&mutex_);
- return frame_pre_processor_.DecimatedWidth();
-}
-
-uint32_t VideoProcessingModuleImpl::DecimatedHeight() const {
- rtc::CritScope cs(&mutex_);
- return frame_pre_processor_.DecimatedHeight();
-}
-
-int32_t VideoProcessingModuleImpl::PreprocessFrame(
- const VideoFrame& frame,
- VideoFrame** processed_frame) {
- rtc::CritScope mutex(&mutex_);
- return frame_pre_processor_.PreprocessFrame(frame, processed_frame);
-}
-
-VideoContentMetrics* VideoProcessingModuleImpl::ContentMetrics() const {
- rtc::CritScope mutex(&mutex_);
- return frame_pre_processor_.ContentMetrics();
-}
-
-void VideoProcessingModuleImpl::EnableContentAnalysis(bool enable) {
- rtc::CritScope mutex(&mutex_);
- frame_pre_processor_.EnableContentAnalysis(enable);
-}
-
-} // namespace webrtc
diff --git a/webrtc/modules/video_processing/main/source/video_processing_impl.h b/webrtc/modules/video_processing/main/source/video_processing_impl.h
deleted file mode 100644
index fed5197f49..0000000000
--- a/webrtc/modules/video_processing/main/source/video_processing_impl.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULE_VIDEO_PROCESSING_IMPL_H
-#define WEBRTC_MODULE_VIDEO_PROCESSING_IMPL_H
-
-#include "webrtc/base/criticalsection.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/modules/video_processing/main/source/brighten.h"
-#include "webrtc/modules/video_processing/main/source/brightness_detection.h"
-#include "webrtc/modules/video_processing/main/source/deflickering.h"
-#include "webrtc/modules/video_processing/main/source/frame_preprocessor.h"
-
-namespace webrtc {
-class CriticalSectionWrapper;
-
-class VideoProcessingModuleImpl : public VideoProcessingModule {
- public:
- VideoProcessingModuleImpl();
- ~VideoProcessingModuleImpl() override;
-
- void Reset() override;
-
- int32_t Deflickering(VideoFrame* frame, FrameStats* stats) override;
-
- int32_t BrightnessDetection(const VideoFrame& frame,
- const FrameStats& stats) override;
-
- // Frame pre-processor functions
-
- // Enable temporal decimation
- void EnableTemporalDecimation(bool enable) override;
-
- void SetInputFrameResampleMode(VideoFrameResampling resampling_mode) override;
-
- // Enable content analysis
- void EnableContentAnalysis(bool enable) override;
-
- // Set Target Resolution: frame rate and dimension
- int32_t SetTargetResolution(uint32_t width,
- uint32_t height,
- uint32_t frame_rate) override;
-
- void SetTargetFramerate(int frame_rate) override;
-
- // Get decimated values: frame rate/dimension
- uint32_t Decimatedframe_rate() override;
- uint32_t DecimatedWidth() const override;
- uint32_t DecimatedHeight() const override;
-
- // Preprocess:
- // Pre-process incoming frame: Sample when needed and compute content
- // metrics when enabled.
- // If no resampling takes place - processed_frame is set to NULL.
- int32_t PreprocessFrame(const VideoFrame& frame,
- VideoFrame** processed_frame) override;
- VideoContentMetrics* ContentMetrics() const override;
-
- private:
- mutable rtc::CriticalSection mutex_;
- VPMDeflickering deflickering_ GUARDED_BY(mutex_);
- VPMBrightnessDetection brightness_detection_;
- VPMFramePreprocessor frame_pre_processor_;
-};
-
-} // namespace
-
-#endif
diff --git a/webrtc/modules/video_processing/main/test/unit_test/brightness_detection_test.cc b/webrtc/modules/video_processing/main/test/unit_test/brightness_detection_test.cc
deleted file mode 100644
index 4d0de3ac98..0000000000
--- a/webrtc/modules/video_processing/main/test/unit_test/brightness_detection_test.cc
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
-
-using namespace webrtc;
-
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(BrightnessDetection))
-{
- uint32_t frameNum = 0;
- int32_t brightnessWarning = 0;
- uint32_t warningCount = 0;
- rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
- while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
- frame_length_)
- {
- EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_,
- height_, 0, kVideoRotation_0, &video_frame_));
- frameNum++;
- VideoProcessingModule::FrameStats stats;
- ASSERT_EQ(0, vpm_->GetFrameStats(&stats, video_frame_));
- ASSERT_GE(brightnessWarning = vpm_->BrightnessDetection(video_frame_,
- stats), 0);
- if (brightnessWarning != VideoProcessingModule::kNoWarning)
- {
- warningCount++;
- }
- }
- ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
-
- // Expect few warnings
- float warningProportion = static_cast<float>(warningCount) / frameNum * 100;
- printf("\nWarning proportions:\n");
- printf("Stock foreman: %.1f %%\n", warningProportion);
- EXPECT_LT(warningProportion, 10);
-
- rewind(source_file_);
- frameNum = 0;
- warningCount = 0;
- while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
- frame_length_ &&
- frameNum < 300)
- {
- EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_,
- height_, 0, kVideoRotation_0, &video_frame_));
- frameNum++;
-
- uint8_t* frame = video_frame_.buffer(kYPlane);
- uint32_t yTmp = 0;
- for (int yIdx = 0; yIdx < width_ * height_; yIdx++)
- {
- yTmp = frame[yIdx] << 1;
- if (yTmp > 255)
- {
- yTmp = 255;
- }
- frame[yIdx] = static_cast<uint8_t>(yTmp);
- }
-
- VideoProcessingModule::FrameStats stats;
- ASSERT_EQ(0, vpm_->GetFrameStats(&stats, video_frame_));
- ASSERT_GE(brightnessWarning = vpm_->BrightnessDetection(video_frame_,
- stats), 0);
- EXPECT_NE(VideoProcessingModule::kDarkWarning, brightnessWarning);
- if (brightnessWarning == VideoProcessingModule::kBrightWarning)
- {
- warningCount++;
- }
- }
- ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
-
- // Expect many brightness warnings
- warningProportion = static_cast<float>(warningCount) / frameNum * 100;
- printf("Bright foreman: %.1f %%\n", warningProportion);
- EXPECT_GT(warningProportion, 95);
-
- rewind(source_file_);
- frameNum = 0;
- warningCount = 0;
- while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
- frame_length_ && frameNum < 300)
- {
- EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_,
- height_, 0, kVideoRotation_0, &video_frame_));
- frameNum++;
-
- uint8_t* y_plane = video_frame_.buffer(kYPlane);
- int32_t yTmp = 0;
- for (int yIdx = 0; yIdx < width_ * height_; yIdx++)
- {
- yTmp = y_plane[yIdx] >> 1;
- y_plane[yIdx] = static_cast<uint8_t>(yTmp);
- }
-
- VideoProcessingModule::FrameStats stats;
- ASSERT_EQ(0, vpm_->GetFrameStats(&stats, video_frame_));
- ASSERT_GE(brightnessWarning = vpm_->BrightnessDetection(video_frame_,
- stats), 0);
- EXPECT_NE(VideoProcessingModule::kBrightWarning, brightnessWarning);
- if (brightnessWarning == VideoProcessingModule::kDarkWarning)
- {
- warningCount++;
- }
- }
- ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
-
- // Expect many darkness warnings
- warningProportion = static_cast<float>(warningCount) / frameNum * 100;
- printf("Dark foreman: %.1f %%\n\n", warningProportion);
- EXPECT_GT(warningProportion, 90);
-}
diff --git a/webrtc/modules/video_processing/main/test/unit_test/deflickering_test.cc b/webrtc/modules/video_processing/main/test/unit_test/deflickering_test.cc
deleted file mode 100644
index 83d09ef486..0000000000
--- a/webrtc/modules/video_processing/main/test/unit_test/deflickering_test.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.h"
-#include "webrtc/system_wrappers/include/tick_util.h"
-#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
-
-namespace webrtc {
-
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(Deflickering))
-{
- enum { NumRuns = 30 };
- uint32_t frameNum = 0;
- const uint32_t frame_rate = 15;
-
- int64_t min_runtime = 0;
- int64_t avg_runtime = 0;
-
- // Close automatically opened Foreman.
- fclose(source_file_);
- const std::string input_file =
- webrtc::test::ResourcePath("deflicker_before_cif_short", "yuv");
- source_file_ = fopen(input_file.c_str(), "rb");
- ASSERT_TRUE(source_file_ != NULL) <<
- "Cannot read input file: " << input_file << "\n";
-
- const std::string output_file =
- webrtc::test::OutputPath() + "deflicker_output_cif_short.yuv";
- FILE* deflickerFile = fopen(output_file.c_str(), "wb");
- ASSERT_TRUE(deflickerFile != NULL) <<
- "Could not open output file: " << output_file << "\n";
-
- printf("\nRun time [us / frame]:\n");
- rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
- for (uint32_t run_idx = 0; run_idx < NumRuns; run_idx++)
- {
- TickTime t0;
- TickTime t1;
- TickInterval acc_ticks;
- uint32_t timeStamp = 1;
-
- frameNum = 0;
- while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
- frame_length_)
- {
- frameNum++;
- EXPECT_EQ(
- 0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_,
- height_, 0, kVideoRotation_0, &video_frame_));
- video_frame_.set_timestamp(timeStamp);
-
- t0 = TickTime::Now();
- VideoProcessingModule::FrameStats stats;
- ASSERT_EQ(0, vpm_->GetFrameStats(&stats, video_frame_));
- ASSERT_EQ(0, vpm_->Deflickering(&video_frame_, &stats));
- t1 = TickTime::Now();
- acc_ticks += (t1 - t0);
-
- if (run_idx == 0)
- {
- if (PrintVideoFrame(video_frame_, deflickerFile) < 0) {
- return;
- }
- }
- timeStamp += (90000 / frame_rate);
- }
- ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
-
- printf("%u\n", static_cast<int>(acc_ticks.Microseconds() / frameNum));
- if (acc_ticks.Microseconds() < min_runtime || run_idx == 0)
- {
- min_runtime = acc_ticks.Microseconds();
- }
- avg_runtime += acc_ticks.Microseconds();
-
- rewind(source_file_);
- }
- ASSERT_EQ(0, fclose(deflickerFile));
- // TODO(kjellander): Add verification of deflicker output file.
-
- printf("\nAverage run time = %d us / frame\n",
- static_cast<int>(avg_runtime / frameNum / NumRuns));
- printf("Min run time = %d us / frame\n\n",
- static_cast<int>(min_runtime / frameNum));
-}
-
-} // namespace webrtc
diff --git a/webrtc/modules/video_processing/main/source/spatial_resampler.cc b/webrtc/modules/video_processing/spatial_resampler.cc
index 9360e68b41..cdbe0efac1 100644
--- a/webrtc/modules/video_processing/main/source/spatial_resampler.cc
+++ b/webrtc/modules/video_processing/spatial_resampler.cc
@@ -8,8 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_processing/main/source/spatial_resampler.h"
-
+#include "webrtc/modules/video_processing/spatial_resampler.h"
namespace webrtc {
@@ -21,12 +20,13 @@ VPMSimpleSpatialResampler::VPMSimpleSpatialResampler()
VPMSimpleSpatialResampler::~VPMSimpleSpatialResampler() {}
-
int32_t VPMSimpleSpatialResampler::SetTargetFrameSize(int32_t width,
int32_t height) {
- if (resampling_mode_ == kNoRescaling) return VPM_OK;
+ if (resampling_mode_ == kNoRescaling)
+ return VPM_OK;
- if (width < 1 || height < 1) return VPM_PARAMETER_ERROR;
+ if (width < 1 || height < 1)
+ return VPM_PARAMETER_ERROR;
target_width_ = width;
target_height_ = height;
@@ -48,11 +48,11 @@ void VPMSimpleSpatialResampler::Reset() {
int32_t VPMSimpleSpatialResampler::ResampleFrame(const VideoFrame& inFrame,
VideoFrame* outFrame) {
// Don't copy if frame remains as is.
- if (resampling_mode_ == kNoRescaling)
- return VPM_OK;
+ if (resampling_mode_ == kNoRescaling) {
+ return VPM_OK;
// Check if re-sampling is needed
- else if ((inFrame.width() == target_width_) &&
- (inFrame.height() == target_height_)) {
+ } else if ((inFrame.width() == target_width_) &&
+ (inFrame.height() == target_height_)) {
return VPM_OK;
}
@@ -60,8 +60,8 @@ int32_t VPMSimpleSpatialResampler::ResampleFrame(const VideoFrame& inFrame,
// TODO(mikhal/marpan): Should we allow for setting the filter mode in
// _scale.Set() with |resampling_mode_|?
int ret_val = 0;
- ret_val = scaler_.Set(inFrame.width(), inFrame.height(),
- target_width_, target_height_, kI420, kI420, kScaleBox);
+ ret_val = scaler_.Set(inFrame.width(), inFrame.height(), target_width_,
+ target_height_, kI420, kI420, kScaleBox);
if (ret_val < 0)
return ret_val;
@@ -86,10 +86,9 @@ int32_t VPMSimpleSpatialResampler::TargetWidth() {
return target_width_;
}
-bool VPMSimpleSpatialResampler::ApplyResample(int32_t width,
- int32_t height) {
+bool VPMSimpleSpatialResampler::ApplyResample(int32_t width, int32_t height) {
if ((width == target_width_ && height == target_height_) ||
- resampling_mode_ == kNoRescaling)
+ resampling_mode_ == kNoRescaling)
return false;
else
return true;
diff --git a/webrtc/modules/video_processing/main/source/spatial_resampler.h b/webrtc/modules/video_processing/spatial_resampler.h
index f965a40a83..51820e24e5 100644
--- a/webrtc/modules/video_processing/main/source/spatial_resampler.h
+++ b/webrtc/modules/video_processing/spatial_resampler.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_SPATIAL_RESAMPLER_H
-#define WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_SPATIAL_RESAMPLER_H
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_SPATIAL_RESAMPLER_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_SPATIAL_RESAMPLER_H_
#include "webrtc/typedefs.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing_defines.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_processing/include/video_processing_defines.h"
#include "webrtc/common_video/libyuv/include/scaler.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
@@ -23,10 +23,10 @@ namespace webrtc {
class VPMSpatialResampler {
public:
- virtual ~VPMSpatialResampler() {};
+ virtual ~VPMSpatialResampler() {}
virtual int32_t SetTargetFrameSize(int32_t width, int32_t height) = 0;
- virtual void SetInputFrameResampleMode(VideoFrameResampling
- resampling_mode) = 0;
+ virtual void SetInputFrameResampleMode(
+ VideoFrameResampling resampling_mode) = 0;
virtual void Reset() = 0;
virtual int32_t ResampleFrame(const VideoFrame& inFrame,
VideoFrame* outFrame) = 0;
@@ -49,13 +49,12 @@ class VPMSimpleSpatialResampler : public VPMSpatialResampler {
virtual bool ApplyResample(int32_t width, int32_t height);
private:
-
- VideoFrameResampling resampling_mode_;
- int32_t target_width_;
- int32_t target_height_;
- Scaler scaler_;
+ VideoFrameResampling resampling_mode_;
+ int32_t target_width_;
+ int32_t target_height_;
+ Scaler scaler_;
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_SPATIAL_RESAMPLER_H
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_SPATIAL_RESAMPLER_H_
diff --git a/webrtc/modules/video_processing/test/brightness_detection_test.cc b/webrtc/modules/video_processing/test/brightness_detection_test.cc
new file mode 100644
index 0000000000..669bb183e5
--- /dev/null
+++ b/webrtc/modules/video_processing/test/brightness_detection_test.cc
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
+
+namespace webrtc {
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_BrightnessDetection DISABLED_BrightnessDetection
+#else
+#define MAYBE_BrightnessDetection BrightnessDetection
+#endif
+TEST_F(VideoProcessingTest, MAYBE_BrightnessDetection) {
+ uint32_t frameNum = 0;
+ int32_t brightnessWarning = 0;
+ uint32_t warningCount = 0;
+ rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_) {
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+ frameNum++;
+ VideoProcessing::FrameStats stats;
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_, stats),
+ 0);
+ if (brightnessWarning != VideoProcessing::kNoWarning) {
+ warningCount++;
+ }
+ }
+ ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
+
+ // Expect few warnings
+ float warningProportion = static_cast<float>(warningCount) / frameNum * 100;
+ printf("\nWarning proportions:\n");
+ printf("Stock foreman: %.1f %%\n", warningProportion);
+ EXPECT_LT(warningProportion, 10);
+
+ rewind(source_file_);
+ frameNum = 0;
+ warningCount = 0;
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_ &&
+ frameNum < 300) {
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+ frameNum++;
+
+ uint8_t* frame = video_frame_.buffer(kYPlane);
+ uint32_t yTmp = 0;
+ for (int yIdx = 0; yIdx < width_ * height_; yIdx++) {
+ yTmp = frame[yIdx] << 1;
+ if (yTmp > 255) {
+ yTmp = 255;
+ }
+ frame[yIdx] = static_cast<uint8_t>(yTmp);
+ }
+
+ VideoProcessing::FrameStats stats;
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_, stats),
+ 0);
+ EXPECT_NE(VideoProcessing::kDarkWarning, brightnessWarning);
+ if (brightnessWarning == VideoProcessing::kBrightWarning) {
+ warningCount++;
+ }
+ }
+ ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
+
+ // Expect many brightness warnings
+ warningProportion = static_cast<float>(warningCount) / frameNum * 100;
+ printf("Bright foreman: %.1f %%\n", warningProportion);
+ EXPECT_GT(warningProportion, 95);
+
+ rewind(source_file_);
+ frameNum = 0;
+ warningCount = 0;
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_ &&
+ frameNum < 300) {
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+ frameNum++;
+
+ uint8_t* y_plane = video_frame_.buffer(kYPlane);
+ int32_t yTmp = 0;
+ for (int yIdx = 0; yIdx < width_ * height_; yIdx++) {
+ yTmp = y_plane[yIdx] >> 1;
+ y_plane[yIdx] = static_cast<uint8_t>(yTmp);
+ }
+
+ VideoProcessing::FrameStats stats;
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_, stats),
+ 0);
+ EXPECT_NE(VideoProcessing::kBrightWarning, brightnessWarning);
+ if (brightnessWarning == VideoProcessing::kDarkWarning) {
+ warningCount++;
+ }
+ }
+ ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
+
+ // Expect many darkness warnings
+ warningProportion = static_cast<float>(warningCount) / frameNum * 100;
+ printf("Dark foreman: %.1f %%\n\n", warningProportion);
+ EXPECT_GT(warningProportion, 90);
+}
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/main/test/unit_test/content_metrics_test.cc b/webrtc/modules/video_processing/test/content_metrics_test.cc
index d9c1309d9b..782f9cff59 100644
--- a/webrtc/modules/video_processing/main/test/unit_test/content_metrics_test.cc
+++ b/webrtc/modules/video_processing/test/content_metrics_test.cc
@@ -9,28 +9,32 @@
*/
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/modules/video_processing/main/source/content_analysis.h"
-#include "webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/content_analysis.h"
+#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
namespace webrtc {
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(ContentAnalysis)) {
- VPMContentAnalysis ca__c(false);
- VPMContentAnalysis ca__sse(true);
- VideoContentMetrics *_cM_c, *_cM_SSE;
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_ContentAnalysis) {
+#else
+TEST_F(VideoProcessingTest, ContentAnalysis) {
+#endif
+ VPMContentAnalysis ca__c(false);
+ VPMContentAnalysis ca__sse(true);
+ VideoContentMetrics* _cM_c;
+ VideoContentMetrics* _cM_SSE;
- ca__c.Initialize(width_,height_);
- ca__sse.Initialize(width_,height_);
+ ca__c.Initialize(width_, height_);
+ ca__sse.Initialize(width_, height_);
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
- while (fread(video_buffer.get(), 1, frame_length_, source_file_)
- == frame_length_) {
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_) {
// Using ConvertToI420 to add stride to the image.
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
- _cM_c = ca__c.ComputeContentMetrics(video_frame_);
+ _cM_c = ca__c.ComputeContentMetrics(video_frame_);
_cM_SSE = ca__sse.ComputeContentMetrics(video_frame_);
ASSERT_EQ(_cM_c->spatial_pred_err, _cM_SSE->spatial_pred_err);
diff --git a/webrtc/modules/video_processing/main/test/unit_test/createTable.m b/webrtc/modules/video_processing/test/createTable.m
index 2c7fb522f6..fe8777ee71 100644
--- a/webrtc/modules/video_processing/main/test/unit_test/createTable.m
+++ b/webrtc/modules/video_processing/test/createTable.m
@@ -31,7 +31,7 @@ A=(1-B)/r0;
f0=A*x0.^2+B*x0; % compander function in zone 1
% equation system for finding second zone parameters
-M=[r0^3 r0^2 r0 1;
+M=[r0^3 r0^2 r0 1;
3*r0^2 2*r0 1 0;
3*r1^2 2*r1 1 0;
r1^3 r1^2 r1 1];
@@ -173,7 +173,7 @@ for k=1:size(y,3)
end
end
end
-
+
fprintf('\nWriting modified test file...')
writeYUV420file('../out/Debug/foremanColorEnhanced.yuv',y,unew,vnew);
fprintf(' done\n');
diff --git a/webrtc/modules/video_processing/test/deflickering_test.cc b/webrtc/modules/video_processing/test/deflickering_test.cc
new file mode 100644
index 0000000000..5410015b06
--- /dev/null
+++ b/webrtc/modules/video_processing/test/deflickering_test.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
+#include "webrtc/system_wrappers/include/tick_util.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_Deflickering) {
+#else
+TEST_F(VideoProcessingTest, Deflickering) {
+#endif
+ enum { NumRuns = 30 };
+ uint32_t frameNum = 0;
+ const uint32_t frame_rate = 15;
+
+ int64_t min_runtime = 0;
+ int64_t avg_runtime = 0;
+
+ // Close automatically opened Foreman.
+ fclose(source_file_);
+ const std::string input_file =
+ webrtc::test::ResourcePath("deflicker_before_cif_short", "yuv");
+ source_file_ = fopen(input_file.c_str(), "rb");
+ ASSERT_TRUE(source_file_ != NULL) << "Cannot read input file: " << input_file
+ << "\n";
+
+ const std::string output_file =
+ webrtc::test::OutputPath() + "deflicker_output_cif_short.yuv";
+ FILE* deflickerFile = fopen(output_file.c_str(), "wb");
+ ASSERT_TRUE(deflickerFile != NULL)
+ << "Could not open output file: " << output_file << "\n";
+
+ printf("\nRun time [us / frame]:\n");
+ rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
+ for (uint32_t run_idx = 0; run_idx < NumRuns; run_idx++) {
+ TickTime t0;
+ TickTime t1;
+ TickInterval acc_ticks;
+ uint32_t timeStamp = 1;
+
+ frameNum = 0;
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_) {
+ frameNum++;
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_,
+ height_, 0, kVideoRotation_0, &video_frame_));
+ video_frame_.set_timestamp(timeStamp);
+
+ t0 = TickTime::Now();
+ VideoProcessing::FrameStats stats;
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ ASSERT_EQ(0, vp_->Deflickering(&video_frame_, &stats));
+ t1 = TickTime::Now();
+ acc_ticks += (t1 - t0);
+
+ if (run_idx == 0) {
+ if (PrintVideoFrame(video_frame_, deflickerFile) < 0) {
+ return;
+ }
+ }
+ timeStamp += (90000 / frame_rate);
+ }
+ ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
+
+ printf("%u\n", static_cast<int>(acc_ticks.Microseconds() / frameNum));
+ if (acc_ticks.Microseconds() < min_runtime || run_idx == 0) {
+ min_runtime = acc_ticks.Microseconds();
+ }
+ avg_runtime += acc_ticks.Microseconds();
+
+ rewind(source_file_);
+ }
+ ASSERT_EQ(0, fclose(deflickerFile));
+ // TODO(kjellander): Add verification of deflicker output file.
+
+ printf("\nAverage run time = %d us / frame\n",
+ static_cast<int>(avg_runtime / frameNum / NumRuns));
+ printf("Min run time = %d us / frame\n\n",
+ static_cast<int>(min_runtime / frameNum));
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/test/denoiser_test.cc b/webrtc/modules/video_processing/test/denoiser_test.cc
new file mode 100644
index 0000000000..551a77617d
--- /dev/null
+++ b/webrtc/modules/video_processing/test/denoiser_test.cc
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
+#include "webrtc/modules/video_processing/video_denoiser.h"
+
+namespace webrtc {
+
+TEST_F(VideoProcessingTest, CopyMem) {
+ rtc::scoped_ptr<DenoiserFilter> df_c(DenoiserFilter::Create(false));
+ rtc::scoped_ptr<DenoiserFilter> df_sse_neon(DenoiserFilter::Create(true));
+ uint8_t src[16 * 16], dst[16 * 16];
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ src[i * 16 + j] = i * 16 + j;
+ }
+ }
+
+ memset(dst, 0, 8 * 8);
+ df_c->CopyMem8x8(src, 8, dst, 8);
+ EXPECT_EQ(0, memcmp(src, dst, 8 * 8));
+
+ memset(dst, 0, 16 * 16);
+ df_c->CopyMem16x16(src, 16, dst, 16);
+ EXPECT_EQ(0, memcmp(src, dst, 16 * 16));
+
+ memset(dst, 0, 8 * 8);
+ df_sse_neon->CopyMem16x16(src, 8, dst, 8);
+ EXPECT_EQ(0, memcmp(src, dst, 8 * 8));
+
+ memset(dst, 0, 16 * 16);
+ df_sse_neon->CopyMem16x16(src, 16, dst, 16);
+ EXPECT_EQ(0, memcmp(src, dst, 16 * 16));
+}
+
+TEST_F(VideoProcessingTest, Variance) {
+ rtc::scoped_ptr<DenoiserFilter> df_c(DenoiserFilter::Create(false));
+ rtc::scoped_ptr<DenoiserFilter> df_sse_neon(DenoiserFilter::Create(true));
+ uint8_t src[16 * 16], dst[16 * 16];
+ uint32_t sum = 0, sse = 0, var;
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ src[i * 16 + j] = i * 16 + j;
+ }
+ }
+ // Compute the 16x8 variance of the 16x16 block.
+ for (int i = 0; i < 8; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ sum += (i * 32 + j);
+ sse += (i * 32 + j) * (i * 32 + j);
+ }
+ }
+ var = sse - ((sum * sum) >> 7);
+ memset(dst, 0, 16 * 16);
+ EXPECT_EQ(var, df_c->Variance16x8(src, 16, dst, 16, &sse));
+ EXPECT_EQ(var, df_sse_neon->Variance16x8(src, 16, dst, 16, &sse));
+}
+
+TEST_F(VideoProcessingTest, MbDenoise) {
+ rtc::scoped_ptr<DenoiserFilter> df_c(DenoiserFilter::Create(false));
+ rtc::scoped_ptr<DenoiserFilter> df_sse_neon(DenoiserFilter::Create(true));
+ uint8_t running_src[16 * 16], src[16 * 16], dst[16 * 16], dst_ref[16 * 16];
+
+ // Test case: |diff| <= |3 + shift_inc1|
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ running_src[i * 16 + j] = i * 11 + j;
+ src[i * 16 + j] = i * 11 + j + 2;
+ dst_ref[i * 16 + j] = running_src[i * 16 + j];
+ }
+ }
+ memset(dst, 0, 16 * 16);
+ df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_ref, 16 * 16));
+
+ // Test case: |diff| >= |4 + shift_inc1|
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ running_src[i * 16 + j] = i * 11 + j;
+ src[i * 16 + j] = i * 11 + j + 5;
+ dst_ref[i * 16 + j] = src[i * 16 + j] - 2;
+ }
+ }
+ memset(dst, 0, 16 * 16);
+ df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_ref, 16 * 16));
+ memset(dst, 0, 16 * 16);
+ df_sse_neon->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_ref, 16 * 16));
+
+ // Test case: |diff| >= 8
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ running_src[i * 16 + j] = i * 11 + j;
+ src[i * 16 + j] = i * 11 + j + 8;
+ dst_ref[i * 16 + j] = src[i * 16 + j] - 6;
+ }
+ }
+ memset(dst, 0, 16 * 16);
+ df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_ref, 16 * 16));
+ memset(dst, 0, 16 * 16);
+ df_sse_neon->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_ref, 16 * 16));
+
+ // Test case: |diff| > 15
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ running_src[i * 16 + j] = i * 11 + j;
+ src[i * 16 + j] = i * 11 + j + 16;
+ }
+ }
+ memset(dst, 0, 16 * 16);
+ DenoiserDecision decision =
+ df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(COPY_BLOCK, decision);
+ decision = df_sse_neon->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(COPY_BLOCK, decision);
+}
+
+TEST_F(VideoProcessingTest, Denoiser) {
+ // Create pure C denoiser.
+ VideoDenoiser denoiser_c(false);
+ // Create SSE or NEON denoiser.
+ VideoDenoiser denoiser_sse_neon(true);
+ VideoFrame denoised_frame_c;
+ VideoFrame denoised_frame_sse_neon;
+
+ rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_) {
+ // Using ConvertToI420 to add stride to the image.
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+
+ denoiser_c.DenoiseFrame(video_frame_, &denoised_frame_c);
+ denoiser_sse_neon.DenoiseFrame(video_frame_, &denoised_frame_sse_neon);
+
+ // Denoising results should be the same for C and SSE/NEON denoiser.
+ ASSERT_EQ(true, denoised_frame_c.EqualsFrame(denoised_frame_sse_neon));
+ }
+ ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/main/test/unit_test/readYUV420file.m b/webrtc/modules/video_processing/test/readYUV420file.m
index 03013efd3a..f409820283 100644
--- a/webrtc/modules/video_processing/main/test/unit_test/readYUV420file.m
+++ b/webrtc/modules/video_processing/test/readYUV420file.m
@@ -10,7 +10,7 @@ end
nPx=width*height;
% nPx bytes luminance, nPx/4 bytes U, nPx/4 bytes V
-frameSizeBytes = nPx*1.5;
+frameSizeBytes = nPx*1.5;
% calculate number of frames
fseek(fid,0,'eof'); % move to end of file
@@ -27,19 +27,19 @@ V=uint8(zeros(height/2,width/2,numFrames));
[X,nBytes]=fread(fid, frameSizeBytes, 'uchar');
for k=1:numFrames
-
+
% Store luminance
Y(:,:,k)=uint8(reshape(X(1:nPx), width, height).');
-
+
% Store U channel
U(:,:,k)=uint8(reshape(X(nPx + (1:nPx/4)), width/2, height/2).');
% Store V channel
V(:,:,k)=uint8(reshape(X(nPx + nPx/4 + (1:nPx/4)), width/2, height/2).');
-
+
% Read next frame
[X,nBytes]=fread(fid, frameSizeBytes, 'uchar');
end
-
+
fclose(fid);
diff --git a/webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.cc b/webrtc/modules/video_processing/test/video_processing_unittest.cc
index 11ccc4891b..2fd8fb6673 100644
--- a/webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.cc
+++ b/webrtc/modules/video_processing/test/video_processing_unittest.cc
@@ -8,15 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.h"
+#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
+
+#include <gflags/gflags.h>
#include <string>
-#include <gflags/gflags.h>
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
@@ -30,8 +30,8 @@ DEFINE_bool(gen_files, false, "Output files for visual inspection.");
static void PreprocessFrameAndVerify(const VideoFrame& source,
int target_width,
int target_height,
- VideoProcessingModule* vpm,
- VideoFrame** out_frame);
+ VideoProcessing* vpm,
+ const VideoFrame* out_frame);
static void CropFrame(const uint8_t* source_data,
int source_width,
int source_height,
@@ -49,14 +49,14 @@ static void TestSize(const VideoFrame& source_frame,
int target_width,
int target_height,
double expected_psnr,
- VideoProcessingModule* vpm);
+ VideoProcessing* vpm);
static bool CompareFrames(const webrtc::VideoFrame& frame1,
const webrtc::VideoFrame& frame2);
static void WriteProcessedFrameForVisualInspection(const VideoFrame& source,
const VideoFrame& processed);
-VideoProcessingModuleTest::VideoProcessingModuleTest()
- : vpm_(NULL),
+VideoProcessingTest::VideoProcessingTest()
+ : vp_(NULL),
source_file_(NULL),
width_(352),
half_width_((width_ + 1) / 2),
@@ -65,155 +65,179 @@ VideoProcessingModuleTest::VideoProcessingModuleTest()
size_uv_(half_width_ * ((height_ + 1) / 2)),
frame_length_(CalcBufferSize(kI420, width_, height_)) {}
-void VideoProcessingModuleTest::SetUp() {
- vpm_ = VideoProcessingModule::Create();
- ASSERT_TRUE(vpm_ != NULL);
+void VideoProcessingTest::SetUp() {
+ vp_ = VideoProcessing::Create();
+ ASSERT_TRUE(vp_ != NULL);
ASSERT_EQ(0, video_frame_.CreateEmptyFrame(width_, height_, width_,
- half_width_, half_width_));
+ half_width_, half_width_));
// Clear video frame so DrMemory/Valgrind will allow reads of the buffer.
memset(video_frame_.buffer(kYPlane), 0, video_frame_.allocated_size(kYPlane));
memset(video_frame_.buffer(kUPlane), 0, video_frame_.allocated_size(kUPlane));
memset(video_frame_.buffer(kVPlane), 0, video_frame_.allocated_size(kVPlane));
const std::string video_file =
webrtc::test::ResourcePath("foreman_cif", "yuv");
- source_file_ = fopen(video_file.c_str(),"rb");
- ASSERT_TRUE(source_file_ != NULL) <<
- "Cannot read source file: " + video_file + "\n";
+ source_file_ = fopen(video_file.c_str(), "rb");
+ ASSERT_TRUE(source_file_ != NULL)
+ << "Cannot read source file: " + video_file + "\n";
}
-void VideoProcessingModuleTest::TearDown() {
- if (source_file_ != NULL) {
+void VideoProcessingTest::TearDown() {
+ if (source_file_ != NULL) {
ASSERT_EQ(0, fclose(source_file_));
}
source_file_ = NULL;
-
- if (vpm_ != NULL) {
- VideoProcessingModule::Destroy(vpm_);
- }
- vpm_ = NULL;
+ delete vp_;
+ vp_ = NULL;
}
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(HandleNullBuffer)) {
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_HandleNullBuffer) {
+#else
+TEST_F(VideoProcessingTest, HandleNullBuffer) {
+#endif
// TODO(mikhal/stefan): Do we need this one?
- VideoProcessingModule::FrameStats stats;
+ VideoProcessing::FrameStats stats;
// Video frame with unallocated buffer.
VideoFrame videoFrame;
- EXPECT_EQ(-3, vpm_->GetFrameStats(&stats, videoFrame));
+ vp_->GetFrameStats(videoFrame, &stats);
+ EXPECT_EQ(stats.num_pixels, 0u);
- EXPECT_EQ(-1, vpm_->Deflickering(&videoFrame, &stats));
+ EXPECT_EQ(-1, vp_->Deflickering(&videoFrame, &stats));
- EXPECT_EQ(-3, vpm_->BrightnessDetection(videoFrame, stats));
+ EXPECT_EQ(-3, vp_->BrightnessDetection(videoFrame, stats));
}
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(HandleBadStats)) {
- VideoProcessingModule::FrameStats stats;
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_HandleBadStats) {
+#else
+TEST_F(VideoProcessingTest, HandleBadStats) {
+#endif
+ VideoProcessing::FrameStats stats;
+ vp_->ClearFrameStats(&stats);
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
- ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
- source_file_));
+ ASSERT_EQ(frame_length_,
+ fread(video_buffer.get(), 1, frame_length_, source_file_));
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
- EXPECT_EQ(-1, vpm_->Deflickering(&video_frame_, &stats));
+ EXPECT_EQ(-1, vp_->Deflickering(&video_frame_, &stats));
- EXPECT_EQ(-3, vpm_->BrightnessDetection(video_frame_, stats));
+ EXPECT_EQ(-3, vp_->BrightnessDetection(video_frame_, stats));
}
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(IdenticalResultsAfterReset)) {
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_IdenticalResultsAfterReset) {
+#else
+TEST_F(VideoProcessingTest, IdenticalResultsAfterReset) {
+#endif
VideoFrame video_frame2;
- VideoProcessingModule::FrameStats stats;
+ VideoProcessing::FrameStats stats;
// Only testing non-static functions here.
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
- ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
- source_file_));
+ ASSERT_EQ(frame_length_,
+ fread(video_buffer.get(), 1, frame_length_, source_file_));
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
- ASSERT_EQ(0, vpm_->GetFrameStats(&stats, video_frame_));
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
ASSERT_EQ(0, video_frame2.CopyFrame(video_frame_));
- ASSERT_EQ(0, vpm_->Deflickering(&video_frame_, &stats));
- vpm_->Reset();
+ ASSERT_EQ(0, vp_->Deflickering(&video_frame_, &stats));
+
// Retrieve frame stats again in case Deflickering() has zeroed them.
- ASSERT_EQ(0, vpm_->GetFrameStats(&stats, video_frame2));
- ASSERT_EQ(0, vpm_->Deflickering(&video_frame2, &stats));
+ vp_->GetFrameStats(video_frame2, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ ASSERT_EQ(0, vp_->Deflickering(&video_frame2, &stats));
EXPECT_TRUE(CompareFrames(video_frame_, video_frame2));
- ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
- source_file_));
+ ASSERT_EQ(frame_length_,
+ fread(video_buffer.get(), 1, frame_length_, source_file_));
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
- ASSERT_EQ(0, vpm_->GetFrameStats(&stats, video_frame_));
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
video_frame2.CopyFrame(video_frame_);
- ASSERT_EQ(0, vpm_->BrightnessDetection(video_frame_, stats));
- vpm_->Reset();
- ASSERT_EQ(0, vpm_->BrightnessDetection(video_frame2, stats));
+ ASSERT_EQ(0, vp_->BrightnessDetection(video_frame_, stats));
+
+ ASSERT_EQ(0, vp_->BrightnessDetection(video_frame2, stats));
EXPECT_TRUE(CompareFrames(video_frame_, video_frame2));
}
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(FrameStats)) {
- VideoProcessingModule::FrameStats stats;
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_FrameStats) {
+#else
+TEST_F(VideoProcessingTest, FrameStats) {
+#endif
+ VideoProcessing::FrameStats stats;
+ vp_->ClearFrameStats(&stats);
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
- ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
- source_file_));
+ ASSERT_EQ(frame_length_,
+ fread(video_buffer.get(), 1, frame_length_, source_file_));
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
- EXPECT_FALSE(vpm_->ValidFrameStats(stats));
- EXPECT_EQ(0, vpm_->GetFrameStats(&stats, video_frame_));
- EXPECT_TRUE(vpm_->ValidFrameStats(stats));
+ EXPECT_FALSE(vp_->ValidFrameStats(stats));
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ EXPECT_TRUE(vp_->ValidFrameStats(stats));
printf("\nFrameStats\n");
- printf("mean: %u\nnum_pixels: %u\nsubSamplWidth: "
- "%u\nsumSamplHeight: %u\nsum: %u\n\n",
+ printf("mean: %u\nnum_pixels: %u\nsubSamplFactor: %u\nsum: %u\n\n",
static_cast<unsigned int>(stats.mean),
static_cast<unsigned int>(stats.num_pixels),
- static_cast<unsigned int>(stats.subSamplHeight),
- static_cast<unsigned int>(stats.subSamplWidth),
+ static_cast<unsigned int>(stats.sub_sampling_factor),
static_cast<unsigned int>(stats.sum));
- vpm_->ClearFrameStats(&stats);
- EXPECT_FALSE(vpm_->ValidFrameStats(stats));
+ vp_->ClearFrameStats(&stats);
+ EXPECT_FALSE(vp_->ValidFrameStats(stats));
}
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(PreprocessorLogic)) {
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_PreprocessorLogic) {
+#else
+TEST_F(VideoProcessingTest, PreprocessorLogic) {
+#endif
// Disable temporal sampling (frame dropping).
- vpm_->EnableTemporalDecimation(false);
+ vp_->EnableTemporalDecimation(false);
int resolution = 100;
- EXPECT_EQ(VPM_OK, vpm_->SetTargetResolution(resolution, resolution, 15));
- EXPECT_EQ(VPM_OK, vpm_->SetTargetResolution(resolution, resolution, 30));
+ EXPECT_EQ(VPM_OK, vp_->SetTargetResolution(resolution, resolution, 15));
+ EXPECT_EQ(VPM_OK, vp_->SetTargetResolution(resolution, resolution, 30));
// Disable spatial sampling.
- vpm_->SetInputFrameResampleMode(kNoRescaling);
- EXPECT_EQ(VPM_OK, vpm_->SetTargetResolution(resolution, resolution, 30));
+ vp_->SetInputFrameResampleMode(kNoRescaling);
+ EXPECT_EQ(VPM_OK, vp_->SetTargetResolution(resolution, resolution, 30));
VideoFrame* out_frame = NULL;
// Set rescaling => output frame != NULL.
- vpm_->SetInputFrameResampleMode(kFastRescaling);
- PreprocessFrameAndVerify(video_frame_, resolution, resolution, vpm_,
- &out_frame);
+ vp_->SetInputFrameResampleMode(kFastRescaling);
+ PreprocessFrameAndVerify(video_frame_, resolution, resolution, vp_,
+ out_frame);
// No rescaling=> output frame = NULL.
- vpm_->SetInputFrameResampleMode(kNoRescaling);
- EXPECT_EQ(VPM_OK, vpm_->PreprocessFrame(video_frame_, &out_frame));
- EXPECT_TRUE(out_frame == NULL);
+ vp_->SetInputFrameResampleMode(kNoRescaling);
+ EXPECT_TRUE(vp_->PreprocessFrame(video_frame_) != nullptr);
}
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(Resampler)) {
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_Resampler) {
+#else
+TEST_F(VideoProcessingTest, Resampler) {
+#endif
enum { NumRuns = 1 };
int64_t min_runtime = 0;
int64_t total_runtime = 0;
rewind(source_file_);
- ASSERT_TRUE(source_file_ != NULL) <<
- "Cannot read input file \n";
+ ASSERT_TRUE(source_file_ != NULL) << "Cannot read input file \n";
// CA not needed here
- vpm_->EnableContentAnalysis(false);
+ vp_->EnableContentAnalysis(false);
// no temporal decimation
- vpm_->EnableTemporalDecimation(false);
+ vp_->EnableTemporalDecimation(false);
// Reading test frame
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
- ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
- source_file_));
+ ASSERT_EQ(frame_length_,
+ fread(video_buffer.get(), 1, frame_length_, source_file_));
// Using ConvertToI420 to add stride to the image.
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
@@ -231,43 +255,43 @@ TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(Resampler)) {
// Test scaling to different sizes: source is of |width|/|height| = 352/288.
// Pure scaling:
- TestSize(video_frame_, video_frame_, width_ / 4, height_ / 4, 25.2, vpm_);
- TestSize(video_frame_, video_frame_, width_ / 2, height_ / 2, 28.1, vpm_);
+ TestSize(video_frame_, video_frame_, width_ / 4, height_ / 4, 25.2, vp_);
+ TestSize(video_frame_, video_frame_, width_ / 2, height_ / 2, 28.1, vp_);
// No resampling:
- TestSize(video_frame_, video_frame_, width_, height_, -1, vpm_);
- TestSize(video_frame_, video_frame_, 2 * width_, 2 * height_, 32.2, vpm_);
+ TestSize(video_frame_, video_frame_, width_, height_, -1, vp_);
+ TestSize(video_frame_, video_frame_, 2 * width_, 2 * height_, 32.2, vp_);
// Scaling and cropping. The cropped source frame is the largest center
// aligned region that can be used from the source while preserving aspect
// ratio.
CropFrame(video_buffer.get(), width_, height_, 0, 56, 352, 176,
&cropped_source_frame);
- TestSize(video_frame_, cropped_source_frame, 100, 50, 24.0, vpm_);
+ TestSize(video_frame_, cropped_source_frame, 100, 50, 24.0, vp_);
CropFrame(video_buffer.get(), width_, height_, 0, 30, 352, 225,
&cropped_source_frame);
- TestSize(video_frame_, cropped_source_frame, 400, 256, 31.3, vpm_);
+ TestSize(video_frame_, cropped_source_frame, 400, 256, 31.3, vp_);
CropFrame(video_buffer.get(), width_, height_, 68, 0, 216, 288,
&cropped_source_frame);
- TestSize(video_frame_, cropped_source_frame, 480, 640, 32.15, vpm_);
+ TestSize(video_frame_, cropped_source_frame, 480, 640, 32.15, vp_);
CropFrame(video_buffer.get(), width_, height_, 0, 12, 352, 264,
&cropped_source_frame);
- TestSize(video_frame_, cropped_source_frame, 960, 720, 32.2, vpm_);
+ TestSize(video_frame_, cropped_source_frame, 960, 720, 32.2, vp_);
CropFrame(video_buffer.get(), width_, height_, 0, 44, 352, 198,
&cropped_source_frame);
- TestSize(video_frame_, cropped_source_frame, 1280, 720, 32.15, vpm_);
+ TestSize(video_frame_, cropped_source_frame, 1280, 720, 32.15, vp_);
// Upsampling to odd size.
CropFrame(video_buffer.get(), width_, height_, 0, 26, 352, 233,
&cropped_source_frame);
- TestSize(video_frame_, cropped_source_frame, 501, 333, 32.05, vpm_);
+ TestSize(video_frame_, cropped_source_frame, 501, 333, 32.05, vp_);
// Downsample to odd size.
CropFrame(video_buffer.get(), width_, height_, 0, 34, 352, 219,
&cropped_source_frame);
- TestSize(video_frame_, cropped_source_frame, 281, 175, 29.3, vpm_);
+ TestSize(video_frame_, cropped_source_frame, 281, 175, 29.3, vp_);
// Stop timer.
const int64_t runtime = (TickTime::Now() - time_start).Microseconds();
@@ -279,30 +303,30 @@ TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(Resampler)) {
printf("\nAverage run time = %d us / frame\n",
static_cast<int>(total_runtime));
- printf("Min run time = %d us / frame\n\n",
- static_cast<int>(min_runtime));
+ printf("Min run time = %d us / frame\n\n", static_cast<int>(min_runtime));
}
void PreprocessFrameAndVerify(const VideoFrame& source,
int target_width,
int target_height,
- VideoProcessingModule* vpm,
- VideoFrame** out_frame) {
+ VideoProcessing* vpm,
+ const VideoFrame* out_frame) {
ASSERT_EQ(VPM_OK, vpm->SetTargetResolution(target_width, target_height, 30));
- ASSERT_EQ(VPM_OK, vpm->PreprocessFrame(source, out_frame));
+ out_frame = vpm->PreprocessFrame(source);
+ EXPECT_TRUE(out_frame != nullptr);
- // If no resizing is needed, expect NULL.
+ // If no resizing is needed, expect the original frame.
if (target_width == source.width() && target_height == source.height()) {
- EXPECT_EQ(NULL, *out_frame);
+ EXPECT_EQ(&source, out_frame);
return;
}
// Verify the resampled frame.
- EXPECT_TRUE(*out_frame != NULL);
- EXPECT_EQ(source.render_time_ms(), (*out_frame)->render_time_ms());
- EXPECT_EQ(source.timestamp(), (*out_frame)->timestamp());
- EXPECT_EQ(target_width, (*out_frame)->width());
- EXPECT_EQ(target_height, (*out_frame)->height());
+ EXPECT_TRUE(out_frame != NULL);
+ EXPECT_EQ(source.render_time_ms(), (out_frame)->render_time_ms());
+ EXPECT_EQ(source.timestamp(), (out_frame)->timestamp());
+ EXPECT_EQ(target_width, (out_frame)->width());
+ EXPECT_EQ(target_height, (out_frame)->height());
}
void CropFrame(const uint8_t* source_data,
@@ -326,12 +350,12 @@ void TestSize(const VideoFrame& source_frame,
int target_width,
int target_height,
double expected_psnr,
- VideoProcessingModule* vpm) {
+ VideoProcessing* vpm) {
// Resample source_frame to out_frame.
VideoFrame* out_frame = NULL;
vpm->SetInputFrameResampleMode(kBox);
PreprocessFrameAndVerify(source_frame, target_width, target_height, vpm,
- &out_frame);
+ out_frame);
if (out_frame == NULL)
return;
WriteProcessedFrameForVisualInspection(source_frame, *out_frame);
@@ -340,21 +364,22 @@ void TestSize(const VideoFrame& source_frame,
VideoFrame resampled_source_frame;
resampled_source_frame.CopyFrame(*out_frame);
PreprocessFrameAndVerify(resampled_source_frame, cropped_source_frame.width(),
- cropped_source_frame.height(), vpm, &out_frame);
+ cropped_source_frame.height(), vpm, out_frame);
WriteProcessedFrameForVisualInspection(resampled_source_frame, *out_frame);
// Compute PSNR against the cropped source frame and check expectation.
double psnr = I420PSNR(&cropped_source_frame, out_frame);
EXPECT_GT(psnr, expected_psnr);
- printf("PSNR: %f. PSNR is between source of size %d %d, and a modified "
- "source which is scaled down/up to: %d %d, and back to source size \n",
- psnr, source_frame.width(), source_frame.height(),
- target_width, target_height);
+ printf(
+ "PSNR: %f. PSNR is between source of size %d %d, and a modified "
+ "source which is scaled down/up to: %d %d, and back to source size \n",
+ psnr, source_frame.width(), source_frame.height(), target_width,
+ target_height);
}
bool CompareFrames(const webrtc::VideoFrame& frame1,
const webrtc::VideoFrame& frame2) {
- for (int plane = 0; plane < webrtc::kNumOfPlanes; plane ++) {
+ for (int plane = 0; plane < webrtc::kNumOfPlanes; plane++) {
webrtc::PlaneType plane_type = static_cast<webrtc::PlaneType>(plane);
int allocated_size1 = frame1.allocated_size(plane_type);
int allocated_size2 = frame2.allocated_size(plane_type);
diff --git a/webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.h b/webrtc/modules/video_processing/test/video_processing_unittest.h
index 4a4fda41e6..3433c6ca86 100644
--- a/webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.h
+++ b/webrtc/modules/video_processing/test/video_processing_unittest.h
@@ -8,19 +8,21 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_TEST_UNIT_TEST_VIDEO_PROCESSING_UNITTEST_H
-#define WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_TEST_UNIT_TEST_VIDEO_PROCESSING_UNITTEST_H
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_TEST_VIDEO_PROCESSING_UNITTEST_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_TEST_VIDEO_PROCESSING_UNITTEST_H_
+
+#include <string>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
-class VideoProcessingModuleTest : public ::testing::Test {
+class VideoProcessingTest : public ::testing::Test {
protected:
- VideoProcessingModuleTest();
+ VideoProcessingTest();
virtual void SetUp();
virtual void TearDown();
static void SetUpTestCase() {
@@ -28,10 +30,8 @@ class VideoProcessingModuleTest : public ::testing::Test {
std::string trace_file = webrtc::test::OutputPath() + "VPMTrace.txt";
ASSERT_EQ(0, Trace::SetTraceFile(trace_file.c_str()));
}
- static void TearDownTestCase() {
- Trace::ReturnTrace();
- }
- VideoProcessingModule* vpm_;
+ static void TearDownTestCase() { Trace::ReturnTrace(); }
+ VideoProcessing* vp_;
FILE* source_file_;
VideoFrame video_frame_;
const int width_;
@@ -44,4 +44,4 @@ class VideoProcessingModuleTest : public ::testing::Test {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_TEST_UNIT_TEST_VIDEO_PROCESSING_UNITTEST_H
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_TEST_VIDEO_PROCESSING_UNITTEST_H_
diff --git a/webrtc/modules/video_processing/main/test/unit_test/writeYUV420file.m b/webrtc/modules/video_processing/test/writeYUV420file.m
index 69a8808338..359445009b 100644
--- a/webrtc/modules/video_processing/main/test/unit_test/writeYUV420file.m
+++ b/webrtc/modules/video_processing/test/writeYUV420file.m
@@ -11,10 +11,10 @@ numFrames=size(Y,3);
for k=1:numFrames
% Write luminance
fwrite(fid,uint8(Y(:,:,k).'), 'uchar');
-
+
% Write U channel
fwrite(fid,uint8(U(:,:,k).'), 'uchar');
-
+
% Write V channel
fwrite(fid,uint8(V(:,:,k).'), 'uchar');
end
diff --git a/webrtc/modules/video_processing/util/denoiser_filter.cc b/webrtc/modules/video_processing/util/denoiser_filter.cc
new file mode 100644
index 0000000000..fbc2435cb5
--- /dev/null
+++ b/webrtc/modules/video_processing/util/denoiser_filter.cc
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/video_processing/util/denoiser_filter.h"
+#include "webrtc/modules/video_processing/util/denoiser_filter_c.h"
+#include "webrtc/modules/video_processing/util/denoiser_filter_neon.h"
+#include "webrtc/modules/video_processing/util/denoiser_filter_sse2.h"
+#include "webrtc/system_wrappers/include/cpu_features_wrapper.h"
+
+namespace webrtc {
+
+const int kMotionMagnitudeThreshold = 8 * 3;
+const int kSumDiffThreshold = 16 * 16 * 2;
+const int kSumDiffThresholdHigh = 600;
+
+rtc::scoped_ptr<DenoiserFilter> DenoiserFilter::Create(
+ bool runtime_cpu_detection) {
+ rtc::scoped_ptr<DenoiserFilter> filter;
+
+ if (runtime_cpu_detection) {
+// If we know the minimum architecture at compile time, avoid CPU detection.
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+ // x86 CPU detection required.
+ if (WebRtc_GetCPUInfo(kSSE2)) {
+ filter.reset(new DenoiserFilterSSE2());
+ } else {
+ filter.reset(new DenoiserFilterC());
+ }
+#elif defined(WEBRTC_DETECT_NEON)
+ if (WebRtc_GetCPUFeaturesARM() & kCPUFeatureNEON) {
+ filter.reset(new DenoiserFilterNEON());
+ } else {
+ filter.reset(new DenoiserFilterC());
+ }
+#else
+ filter.reset(new DenoiserFilterC());
+#endif
+ } else {
+ filter.reset(new DenoiserFilterC());
+ }
+
+ RTC_DCHECK(filter.get() != nullptr);
+ return filter;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/util/denoiser_filter.h b/webrtc/modules/video_processing/util/denoiser_filter.h
new file mode 100644
index 0000000000..5d5a61c59c
--- /dev/null
+++ b/webrtc/modules/video_processing/util/denoiser_filter.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_H_
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_processing/include/video_processing_defines.h"
+
+namespace webrtc {
+
+extern const int kMotionMagnitudeThreshold;
+extern const int kSumDiffThreshold;
+extern const int kSumDiffThresholdHigh;
+
+enum DenoiserDecision { COPY_BLOCK, FILTER_BLOCK };
+struct DenoiseMetrics {
+ uint32_t var;
+ uint32_t sad;
+ uint8_t denoise;
+ bool is_skin;
+};
+
+class DenoiserFilter {
+ public:
+ static rtc::scoped_ptr<DenoiserFilter> Create(bool runtime_cpu_detection);
+
+ virtual ~DenoiserFilter() {}
+
+ virtual void CopyMem16x16(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) = 0;
+ virtual void CopyMem8x8(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) = 0;
+ virtual uint32_t Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ unsigned int* sse) = 0;
+ virtual DenoiserDecision MbDenoise(uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) = 0;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_H_
diff --git a/webrtc/modules/video_processing/util/denoiser_filter_c.cc b/webrtc/modules/video_processing/util/denoiser_filter_c.cc
new file mode 100644
index 0000000000..6323980e18
--- /dev/null
+++ b/webrtc/modules/video_processing/util/denoiser_filter_c.cc
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+
+#include "webrtc/modules/video_processing/util/denoiser_filter_c.h"
+
+namespace webrtc {
+
+void DenoiserFilterC::CopyMem16x16(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) {
+ for (int i = 0; i < 16; i++) {
+ memcpy(dst, src, 16);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+void DenoiserFilterC::CopyMem8x8(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) {
+ for (int i = 0; i < 8; i++) {
+ memcpy(dst, src, 8);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+uint32_t DenoiserFilterC::Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ uint32_t* sse) {
+ int sum = 0;
+ *sse = 0;
+ a_stride <<= 1;
+ b_stride <<= 1;
+
+ for (int i = 0; i < 8; i++) {
+ for (int j = 0; j < 16; j++) {
+ const int diff = a[j] - b[j];
+ sum += diff;
+ *sse += diff * diff;
+ }
+
+ a += a_stride;
+ b += b_stride;
+ }
+ return *sse - ((static_cast<int64_t>(sum) * sum) >> 7);
+}
+
+DenoiserDecision DenoiserFilterC::MbDenoise(uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) {
+ int sum_diff_thresh = 0;
+ int sum_diff = 0;
+ int adj_val[3] = {3, 4, 6};
+ int shift_inc1 = 0;
+ int shift_inc2 = 1;
+ int col_sum[16] = {0};
+ if (motion_magnitude <= kMotionMagnitudeThreshold) {
+ if (increase_denoising) {
+ shift_inc1 = 1;
+ shift_inc2 = 2;
+ }
+ adj_val[0] += shift_inc2;
+ adj_val[1] += shift_inc2;
+ adj_val[2] += shift_inc2;
+ }
+
+ for (int r = 0; r < 16; ++r) {
+ for (int c = 0; c < 16; ++c) {
+ int diff = 0;
+ int adjustment = 0;
+ int absdiff = 0;
+
+ diff = mc_running_avg_y[c] - sig[c];
+ absdiff = abs(diff);
+
+ // When |diff| <= |3 + shift_inc1|, use pixel value from
+ // last denoised raw.
+ if (absdiff <= 3 + shift_inc1) {
+ running_avg_y[c] = mc_running_avg_y[c];
+ col_sum[c] += diff;
+ } else {
+ if (absdiff >= 4 + shift_inc1 && absdiff <= 7)
+ adjustment = adj_val[0];
+ else if (absdiff >= 8 && absdiff <= 15)
+ adjustment = adj_val[1];
+ else
+ adjustment = adj_val[2];
+
+ if (diff > 0) {
+ if ((sig[c] + adjustment) > 255)
+ running_avg_y[c] = 255;
+ else
+ running_avg_y[c] = sig[c] + adjustment;
+
+ col_sum[c] += adjustment;
+ } else {
+ if ((sig[c] - adjustment) < 0)
+ running_avg_y[c] = 0;
+ else
+ running_avg_y[c] = sig[c] - adjustment;
+
+ col_sum[c] -= adjustment;
+ }
+ }
+ }
+
+ // Update pointers for next iteration.
+ sig += sig_stride;
+ mc_running_avg_y += mc_avg_y_stride;
+ running_avg_y += avg_y_stride;
+ }
+
+ for (int c = 0; c < 16; ++c) {
+ if (col_sum[c] >= 128) {
+ col_sum[c] = 127;
+ }
+ sum_diff += col_sum[c];
+ }
+
+ sum_diff_thresh = kSumDiffThreshold;
+ if (increase_denoising)
+ sum_diff_thresh = kSumDiffThresholdHigh;
+ if (abs(sum_diff) > sum_diff_thresh) {
+ int delta = ((abs(sum_diff) - sum_diff_thresh) >> 8) + 1;
+ // Only apply the adjustment for max delta up to 3.
+ if (delta < 4) {
+ sig -= sig_stride * 16;
+ mc_running_avg_y -= mc_avg_y_stride * 16;
+ running_avg_y -= avg_y_stride * 16;
+ for (int r = 0; r < 16; ++r) {
+ for (int c = 0; c < 16; ++c) {
+ int diff = mc_running_avg_y[c] - sig[c];
+ int adjustment = abs(diff);
+ if (adjustment > delta)
+ adjustment = delta;
+ if (diff > 0) {
+ // Bring denoised signal down.
+ if (running_avg_y[c] - adjustment < 0)
+ running_avg_y[c] = 0;
+ else
+ running_avg_y[c] = running_avg_y[c] - adjustment;
+ col_sum[c] -= adjustment;
+ } else if (diff < 0) {
+ // Bring denoised signal up.
+ if (running_avg_y[c] + adjustment > 255)
+ running_avg_y[c] = 255;
+ else
+ running_avg_y[c] = running_avg_y[c] + adjustment;
+ col_sum[c] += adjustment;
+ }
+ }
+ sig += sig_stride;
+ mc_running_avg_y += mc_avg_y_stride;
+ running_avg_y += avg_y_stride;
+ }
+
+ sum_diff = 0;
+ for (int c = 0; c < 16; ++c) {
+ if (col_sum[c] >= 128) {
+ col_sum[c] = 127;
+ }
+ sum_diff += col_sum[c];
+ }
+
+ if (abs(sum_diff) > sum_diff_thresh)
+ return COPY_BLOCK;
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+
+ return FILTER_BLOCK;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/util/denoiser_filter_c.h b/webrtc/modules/video_processing/util/denoiser_filter_c.h
new file mode 100644
index 0000000000..fe46ac38ec
--- /dev/null
+++ b/webrtc/modules/video_processing/util/denoiser_filter_c.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_C_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_C_H_
+
+#include "webrtc/modules/video_processing/util/denoiser_filter.h"
+
+namespace webrtc {
+
+class DenoiserFilterC : public DenoiserFilter {
+ public:
+ DenoiserFilterC() {}
+ void CopyMem16x16(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) override;
+ void CopyMem8x8(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) override;
+ uint32_t Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ unsigned int* sse) override;
+ DenoiserDecision MbDenoise(uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) override;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_C_H_
diff --git a/webrtc/modules/video_processing/util/denoiser_filter_neon.cc b/webrtc/modules/video_processing/util/denoiser_filter_neon.cc
new file mode 100644
index 0000000000..b522bf002b
--- /dev/null
+++ b/webrtc/modules/video_processing/util/denoiser_filter_neon.cc
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "webrtc/modules/video_processing/util/denoiser_filter_neon.h"
+
+namespace webrtc {
+
+static int HorizontalAddS16x8(const int16x8_t v_16x8) {
+ const int32x4_t a = vpaddlq_s16(v_16x8);
+ const int64x2_t b = vpaddlq_s32(a);
+ const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
+ vreinterpret_s32_s64(vget_high_s64(b)));
+ return vget_lane_s32(c, 0);
+}
+
+static int HorizontalAddS32x4(const int32x4_t v_32x4) {
+ const int64x2_t b = vpaddlq_s32(v_32x4);
+ const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
+ vreinterpret_s32_s64(vget_high_s64(b)));
+ return vget_lane_s32(c, 0);
+}
+
+static void VarianceNeonW8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ int w,
+ int h,
+ uint32_t* sse,
+ int64_t* sum) {
+ int16x8_t v_sum = vdupq_n_s16(0);
+ int32x4_t v_sse_lo = vdupq_n_s32(0);
+ int32x4_t v_sse_hi = vdupq_n_s32(0);
+
+ for (int i = 0; i < h; ++i) {
+ for (int j = 0; j < w; j += 8) {
+ const uint8x8_t v_a = vld1_u8(&a[j]);
+ const uint8x8_t v_b = vld1_u8(&b[j]);
+ const uint16x8_t v_diff = vsubl_u8(v_a, v_b);
+ const int16x8_t sv_diff = vreinterpretq_s16_u16(v_diff);
+ v_sum = vaddq_s16(v_sum, sv_diff);
+ v_sse_lo =
+ vmlal_s16(v_sse_lo, vget_low_s16(sv_diff), vget_low_s16(sv_diff));
+ v_sse_hi =
+ vmlal_s16(v_sse_hi, vget_high_s16(sv_diff), vget_high_s16(sv_diff));
+ }
+ a += a_stride;
+ b += b_stride;
+ }
+
+ *sum = HorizontalAddS16x8(v_sum);
+ *sse =
+ static_cast<uint32_t>(HorizontalAddS32x4(vaddq_s32(v_sse_lo, v_sse_hi)));
+}
+
+void DenoiserFilterNEON::CopyMem16x16(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) {
+ uint8x16_t qtmp;
+ for (int r = 0; r < 16; r++) {
+ qtmp = vld1q_u8(src);
+ vst1q_u8(dst, qtmp);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+void DenoiserFilterNEON::CopyMem8x8(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) {
+ uint8x8_t vtmp;
+
+ for (int r = 0; r < 8; r++) {
+ vtmp = vld1_u8(src);
+ vst1_u8(dst, vtmp);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+uint32_t DenoiserFilterNEON::Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ uint32_t* sse) {
+ int64_t sum = 0;
+ VarianceNeonW8(a, a_stride << 1, b, b_stride << 1, 16, 8, sse, &sum);
+ return *sse - ((sum * sum) >> 7);
+}
+
+DenoiserDecision DenoiserFilterNEON::MbDenoise(uint8_t* mc_running_avg_y,
+ int mc_running_avg_y_stride,
+ uint8_t* running_avg_y,
+ int running_avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) {
+ // If motion_magnitude is small, making the denoiser more aggressive by
+ // increasing the adjustment for each level, level1 adjustment is
+ // increased, the deltas stay the same.
+ int shift_inc =
+ (increase_denoising && motion_magnitude <= kMotionMagnitudeThreshold) ? 1
+ : 0;
+ const uint8x16_t v_level1_adjustment = vmovq_n_u8(
+ (motion_magnitude <= kMotionMagnitudeThreshold) ? 4 + shift_inc : 3);
+ const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
+ const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
+ const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc);
+ const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
+ const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
+ int64x2_t v_sum_diff_total = vdupq_n_s64(0);
+
+ // Go over lines.
+ for (int r = 0; r < 16; ++r) {
+ // Load inputs.
+ const uint8x16_t v_sig = vld1q_u8(sig);
+ const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
+
+ // Calculate absolute difference and sign masks.
+ const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y);
+
+ // Figure out which level that put us in.
+ const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff);
+ const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff);
+ const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff);
+
+ // Calculate absolute adjustments for level 1, 2 and 3.
+ const uint8x16_t v_level2_adjustment =
+ vandq_u8(v_level2_mask, v_delta_level_1_and_2);
+ const uint8x16_t v_level3_adjustment =
+ vandq_u8(v_level3_mask, v_delta_level_2_and_3);
+ const uint8x16_t v_level1and2_adjustment =
+ vaddq_u8(v_level1_adjustment, v_level2_adjustment);
+ const uint8x16_t v_level1and2and3_adjustment =
+ vaddq_u8(v_level1and2_adjustment, v_level3_adjustment);
+
+ // Figure adjustment absolute value by selecting between the absolute
+ // difference if in level0 or the value for level 1, 2 and 3.
+ const uint8x16_t v_abs_adjustment =
+ vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff);
+
+ // Calculate positive and negative adjustments. Apply them to the signal
+ // and accumulate them. Adjustments are less than eight and the maximum
+ // sum of them (7 * 16) can fit in a signed char.
+ const uint8x16_t v_pos_adjustment =
+ vandq_u8(v_diff_pos_mask, v_abs_adjustment);
+ const uint8x16_t v_neg_adjustment =
+ vandq_u8(v_diff_neg_mask, v_abs_adjustment);
+
+ uint8x16_t v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment);
+ v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment);
+
+ // Store results.
+ vst1q_u8(running_avg_y, v_running_avg_y);
+
+ // Sum all the accumulators to have the sum of all pixel differences
+ // for this macroblock.
+ {
+ const int8x16_t v_sum_diff =
+ vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment),
+ vreinterpretq_s8_u8(v_neg_adjustment));
+ const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
+ const int32x4_t fedc_ba98_7654_3210 =
+ vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
+ const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210);
+
+ v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
+ }
+
+ // Update pointers for next iteration.
+ sig += sig_stride;
+ mc_running_avg_y += mc_running_avg_y_stride;
+ running_avg_y += running_avg_y_stride;
+ }
+
+ // Too much adjustments => copy block.
+ {
+ int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total),
+ vget_low_s64(v_sum_diff_total));
+ int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
+ int sum_diff_thresh = kSumDiffThreshold;
+
+ if (increase_denoising)
+ sum_diff_thresh = kSumDiffThresholdHigh;
+ if (sum_diff > sum_diff_thresh) {
+ // Before returning to copy the block (i.e., apply no denoising),
+ // checK if we can still apply some (weaker) temporal filtering to
+ // this block, that would otherwise not be denoised at all. Simplest
+ // is to apply an additional adjustment to running_avg_y to bring it
+ // closer to sig. The adjustment is capped by a maximum delta, and
+ // chosen such that in most cases the resulting sum_diff will be
+ // within the accceptable range given by sum_diff_thresh.
+
+ // The delta is set by the excess of absolute pixel diff over the
+ // threshold.
+ int delta = ((sum_diff - sum_diff_thresh) >> 8) + 1;
+ // Only apply the adjustment for max delta up to 3.
+ if (delta < 4) {
+ const uint8x16_t k_delta = vmovq_n_u8(delta);
+ sig -= sig_stride * 16;
+ mc_running_avg_y -= mc_running_avg_y_stride * 16;
+ running_avg_y -= running_avg_y_stride * 16;
+ for (int r = 0; r < 16; ++r) {
+ uint8x16_t v_running_avg_y = vld1q_u8(running_avg_y);
+ const uint8x16_t v_sig = vld1q_u8(sig);
+ const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
+
+ // Calculate absolute difference and sign masks.
+ const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_pos_mask =
+ vcltq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_neg_mask =
+ vcgtq_u8(v_sig, v_mc_running_avg_y);
+ // Clamp absolute difference to delta to get the adjustment.
+ const uint8x16_t v_abs_adjustment = vminq_u8(v_abs_diff, (k_delta));
+
+ const uint8x16_t v_pos_adjustment =
+ vandq_u8(v_diff_pos_mask, v_abs_adjustment);
+ const uint8x16_t v_neg_adjustment =
+ vandq_u8(v_diff_neg_mask, v_abs_adjustment);
+
+ v_running_avg_y = vqsubq_u8(v_running_avg_y, v_pos_adjustment);
+ v_running_avg_y = vqaddq_u8(v_running_avg_y, v_neg_adjustment);
+
+ // Store results.
+ vst1q_u8(running_avg_y, v_running_avg_y);
+
+ {
+ const int8x16_t v_sum_diff =
+ vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment),
+ vreinterpretq_s8_u8(v_pos_adjustment));
+
+ const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
+ const int32x4_t fedc_ba98_7654_3210 =
+ vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
+ const int64x2_t fedcba98_76543210 =
+ vpaddlq_s32(fedc_ba98_7654_3210);
+
+ v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
+ }
+ // Update pointers for next iteration.
+ sig += sig_stride;
+ mc_running_avg_y += mc_running_avg_y_stride;
+ running_avg_y += running_avg_y_stride;
+ }
+ {
+ // Update the sum of all pixel differences of this MB.
+ x = vqadd_s64(vget_high_s64(v_sum_diff_total),
+ vget_low_s64(v_sum_diff_total));
+ sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
+
+ if (sum_diff > sum_diff_thresh) {
+ return COPY_BLOCK;
+ }
+ }
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+ }
+
+ // Tell above level that block was filtered.
+ running_avg_y -= running_avg_y_stride * 16;
+ sig -= sig_stride * 16;
+
+ return FILTER_BLOCK;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/util/denoiser_filter_neon.h b/webrtc/modules/video_processing/util/denoiser_filter_neon.h
new file mode 100644
index 0000000000..bc87ba788e
--- /dev/null
+++ b/webrtc/modules/video_processing/util/denoiser_filter_neon.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_NEON_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_NEON_H_
+
+#include "webrtc/modules/video_processing/util/denoiser_filter.h"
+
+namespace webrtc {
+
+class DenoiserFilterNEON : public DenoiserFilter {
+ public:
+ DenoiserFilterNEON() {}
+ void CopyMem16x16(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) override;
+ void CopyMem8x8(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) override;
+ uint32_t Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ unsigned int* sse) override;
+ DenoiserDecision MbDenoise(uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) override;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_NEON_H_
diff --git a/webrtc/modules/video_processing/util/denoiser_filter_sse2.cc b/webrtc/modules/video_processing/util/denoiser_filter_sse2.cc
new file mode 100644
index 0000000000..903d7b1ec6
--- /dev/null
+++ b/webrtc/modules/video_processing/util/denoiser_filter_sse2.cc
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <emmintrin.h>
+
+#include "webrtc/modules/video_processing/util/denoiser_filter_sse2.h"
+
+namespace webrtc {
+
+static void Get8x8varSse2(const uint8_t* src,
+ int src_stride,
+ const uint8_t* ref,
+ int ref_stride,
+ unsigned int* sse,
+ int* sum) {
+ const __m128i zero = _mm_setzero_si128();
+ __m128i vsum = _mm_setzero_si128();
+ __m128i vsse = _mm_setzero_si128();
+
+ for (int i = 0; i < 8; i += 2) {
+ const __m128i src0 = _mm_unpacklo_epi8(
+ _mm_loadl_epi64((const __m128i*)(src + i * src_stride)), zero);
+ const __m128i ref0 = _mm_unpacklo_epi8(
+ _mm_loadl_epi64((const __m128i*)(ref + i * ref_stride)), zero);
+ const __m128i diff0 = _mm_sub_epi16(src0, ref0);
+
+ const __m128i src1 = _mm_unpacklo_epi8(
+ _mm_loadl_epi64((const __m128i*)(src + (i + 1) * src_stride)), zero);
+ const __m128i ref1 = _mm_unpacklo_epi8(
+ _mm_loadl_epi64((const __m128i*)(ref + (i + 1) * ref_stride)), zero);
+ const __m128i diff1 = _mm_sub_epi16(src1, ref1);
+
+ vsum = _mm_add_epi16(vsum, diff0);
+ vsum = _mm_add_epi16(vsum, diff1);
+ vsse = _mm_add_epi32(vsse, _mm_madd_epi16(diff0, diff0));
+ vsse = _mm_add_epi32(vsse, _mm_madd_epi16(diff1, diff1));
+ }
+
+ // sum
+ vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8));
+ vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4));
+ vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 2));
+ *sum = static_cast<int16_t>(_mm_extract_epi16(vsum, 0));
+
+ // sse
+ vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 8));
+ vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 4));
+ *sse = _mm_cvtsi128_si32(vsse);
+}
+
+static void VarianceSSE2(const unsigned char* src,
+ int src_stride,
+ const unsigned char* ref,
+ int ref_stride,
+ int w,
+ int h,
+ uint32_t* sse,
+ int64_t* sum,
+ int block_size) {
+ *sse = 0;
+ *sum = 0;
+
+ for (int i = 0; i < h; i += block_size) {
+ for (int j = 0; j < w; j += block_size) {
+ uint32_t sse0 = 0;
+ int32_t sum0 = 0;
+
+ Get8x8varSse2(src + src_stride * i + j, src_stride,
+ ref + ref_stride * i + j, ref_stride, &sse0, &sum0);
+ *sse += sse0;
+ *sum += sum0;
+ }
+ }
+}
+
+// Compute the sum of all pixel differences of this MB.
+static uint32_t AbsSumDiff16x1(__m128i acc_diff) {
+ const __m128i k_1 = _mm_set1_epi16(1);
+ const __m128i acc_diff_lo =
+ _mm_srai_epi16(_mm_unpacklo_epi8(acc_diff, acc_diff), 8);
+ const __m128i acc_diff_hi =
+ _mm_srai_epi16(_mm_unpackhi_epi8(acc_diff, acc_diff), 8);
+ const __m128i acc_diff_16 = _mm_add_epi16(acc_diff_lo, acc_diff_hi);
+ const __m128i hg_fe_dc_ba = _mm_madd_epi16(acc_diff_16, k_1);
+ const __m128i hgfe_dcba =
+ _mm_add_epi32(hg_fe_dc_ba, _mm_srli_si128(hg_fe_dc_ba, 8));
+ const __m128i hgfedcba =
+ _mm_add_epi32(hgfe_dcba, _mm_srli_si128(hgfe_dcba, 4));
+ unsigned int sum_diff = abs(_mm_cvtsi128_si32(hgfedcba));
+
+ return sum_diff;
+}
+
+// TODO(jackychen): Optimize this function using SSE2.
+void DenoiserFilterSSE2::CopyMem16x16(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) {
+ for (int i = 0; i < 16; i++) {
+ memcpy(dst, src, 16);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+// TODO(jackychen): Optimize this function using SSE2.
+void DenoiserFilterSSE2::CopyMem8x8(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) {
+ for (int i = 0; i < 8; i++) {
+ memcpy(dst, src, 8);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+uint32_t DenoiserFilterSSE2::Variance16x8(const uint8_t* src,
+ int src_stride,
+ const uint8_t* ref,
+ int ref_stride,
+ uint32_t* sse) {
+ int64_t sum = 0;
+ VarianceSSE2(src, src_stride << 1, ref, ref_stride << 1, 16, 8, sse, &sum, 8);
+ return *sse - ((sum * sum) >> 7);
+}
+
+DenoiserDecision DenoiserFilterSSE2::MbDenoise(uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) {
+ int shift_inc =
+ (increase_denoising && motion_magnitude <= kMotionMagnitudeThreshold) ? 1
+ : 0;
+ __m128i acc_diff = _mm_setzero_si128();
+ const __m128i k_0 = _mm_setzero_si128();
+ const __m128i k_4 = _mm_set1_epi8(4 + shift_inc);
+ const __m128i k_8 = _mm_set1_epi8(8);
+ const __m128i k_16 = _mm_set1_epi8(16);
+ // Modify each level's adjustment according to motion_magnitude.
+ const __m128i l3 = _mm_set1_epi8(
+ (motion_magnitude <= kMotionMagnitudeThreshold) ? 7 + shift_inc : 6);
+ // Difference between level 3 and level 2 is 2.
+ const __m128i l32 = _mm_set1_epi8(2);
+ // Difference between level 2 and level 1 is 1.
+ const __m128i l21 = _mm_set1_epi8(1);
+
+ for (int r = 0; r < 16; ++r) {
+ // Calculate differences.
+ const __m128i v_sig =
+ _mm_loadu_si128(reinterpret_cast<const __m128i*>(&sig[0]));
+ const __m128i v_mc_running_avg_y =
+ _mm_loadu_si128(reinterpret_cast<__m128i*>(&mc_running_avg_y[0]));
+ __m128i v_running_avg_y;
+ const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig);
+ const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y);
+ // Obtain the sign. FF if diff is negative.
+ const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0);
+ // Clamp absolute difference to 16 to be used to get mask. Doing this
+ // allows us to use _mm_cmpgt_epi8, which operates on signed byte.
+ const __m128i clamped_absdiff =
+ _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_16);
+ // Get masks for l2 l1 and l0 adjustments.
+ const __m128i mask2 = _mm_cmpgt_epi8(k_16, clamped_absdiff);
+ const __m128i mask1 = _mm_cmpgt_epi8(k_8, clamped_absdiff);
+ const __m128i mask0 = _mm_cmpgt_epi8(k_4, clamped_absdiff);
+ // Get adjustments for l2, l1, and l0.
+ __m128i adj2 = _mm_and_si128(mask2, l32);
+ const __m128i adj1 = _mm_and_si128(mask1, l21);
+ const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff);
+ __m128i adj, padj, nadj;
+
+ // Combine the adjustments and get absolute adjustments.
+ adj2 = _mm_add_epi8(adj2, adj1);
+ adj = _mm_sub_epi8(l3, adj2);
+ adj = _mm_andnot_si128(mask0, adj);
+ adj = _mm_or_si128(adj, adj0);
+
+ // Restore the sign and get positive and negative adjustments.
+ padj = _mm_andnot_si128(diff_sign, adj);
+ nadj = _mm_and_si128(diff_sign, adj);
+
+ // Calculate filtered value.
+ v_running_avg_y = _mm_adds_epu8(v_sig, padj);
+ v_running_avg_y = _mm_subs_epu8(v_running_avg_y, nadj);
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(running_avg_y),
+ v_running_avg_y);
+
+ // Adjustments <=7, and each element in acc_diff can fit in signed
+ // char.
+ acc_diff = _mm_adds_epi8(acc_diff, padj);
+ acc_diff = _mm_subs_epi8(acc_diff, nadj);
+
+ // Update pointers for next iteration.
+ sig += sig_stride;
+ mc_running_avg_y += mc_avg_y_stride;
+ running_avg_y += avg_y_stride;
+ }
+
+ {
+ // Compute the sum of all pixel differences of this MB.
+ unsigned int abs_sum_diff = AbsSumDiff16x1(acc_diff);
+ unsigned int sum_diff_thresh = kSumDiffThreshold;
+ if (increase_denoising)
+ sum_diff_thresh = kSumDiffThresholdHigh;
+ if (abs_sum_diff > sum_diff_thresh) {
+ // Before returning to copy the block (i.e., apply no denoising),
+ // check if we can still apply some (weaker) temporal filtering to
+ // this block, that would otherwise not be denoised at all. Simplest
+ // is to apply an additional adjustment to running_avg_y to bring it
+ // closer to sig. The adjustment is capped by a maximum delta, and
+ // chosen such that in most cases the resulting sum_diff will be
+ // within the acceptable range given by sum_diff_thresh.
+
+ // The delta is set by the excess of absolute pixel diff over the
+ // threshold.
+ int delta = ((abs_sum_diff - sum_diff_thresh) >> 8) + 1;
+ // Only apply the adjustment for max delta up to 3.
+ if (delta < 4) {
+ const __m128i k_delta = _mm_set1_epi8(delta);
+ sig -= sig_stride * 16;
+ mc_running_avg_y -= mc_avg_y_stride * 16;
+ running_avg_y -= avg_y_stride * 16;
+ for (int r = 0; r < 16; ++r) {
+ __m128i v_running_avg_y =
+ _mm_loadu_si128(reinterpret_cast<__m128i*>(&running_avg_y[0]));
+ // Calculate differences.
+ const __m128i v_sig =
+ _mm_loadu_si128(reinterpret_cast<const __m128i*>(&sig[0]));
+ const __m128i v_mc_running_avg_y =
+ _mm_loadu_si128(reinterpret_cast<__m128i*>(&mc_running_avg_y[0]));
+ const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig);
+ const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y);
+ // Obtain the sign. FF if diff is negative.
+ const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0);
+ // Clamp absolute difference to delta to get the adjustment.
+ const __m128i adj = _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta);
+ // Restore the sign and get positive and negative adjustments.
+ __m128i padj, nadj;
+ padj = _mm_andnot_si128(diff_sign, adj);
+ nadj = _mm_and_si128(diff_sign, adj);
+ // Calculate filtered value.
+ v_running_avg_y = _mm_subs_epu8(v_running_avg_y, padj);
+ v_running_avg_y = _mm_adds_epu8(v_running_avg_y, nadj);
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(running_avg_y),
+ v_running_avg_y);
+
+ // Accumulate the adjustments.
+ acc_diff = _mm_subs_epi8(acc_diff, padj);
+ acc_diff = _mm_adds_epi8(acc_diff, nadj);
+
+ // Update pointers for next iteration.
+ sig += sig_stride;
+ mc_running_avg_y += mc_avg_y_stride;
+ running_avg_y += avg_y_stride;
+ }
+ abs_sum_diff = AbsSumDiff16x1(acc_diff);
+ if (abs_sum_diff > sum_diff_thresh) {
+ return COPY_BLOCK;
+ }
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+ }
+ return FILTER_BLOCK;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/util/denoiser_filter_sse2.h b/webrtc/modules/video_processing/util/denoiser_filter_sse2.h
new file mode 100644
index 0000000000..31d8510902
--- /dev/null
+++ b/webrtc/modules/video_processing/util/denoiser_filter_sse2.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_SSE2_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_SSE2_H_
+
+#include "webrtc/modules/video_processing/util/denoiser_filter.h"
+
+namespace webrtc {
+
+class DenoiserFilterSSE2 : public DenoiserFilter {
+ public:
+ DenoiserFilterSSE2() {}
+ void CopyMem16x16(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) override;
+ void CopyMem8x8(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) override;
+ uint32_t Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ unsigned int* sse) override;
+ DenoiserDecision MbDenoise(uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) override;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_SSE2_H_
diff --git a/webrtc/modules/video_processing/util/skin_detection.cc b/webrtc/modules/video_processing/util/skin_detection.cc
new file mode 100644
index 0000000000..bf631ce2f6
--- /dev/null
+++ b/webrtc/modules/video_processing/util/skin_detection.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+#include <math.h>
+
+#include "webrtc/modules/video_processing/util/skin_detection.h"
+
+namespace webrtc {
+
+// Fixed-point skin color model parameters.
+static const int skin_mean[2] = {7463, 9614}; // q6
+static const int skin_inv_cov[4] = {4107, 1663, 1663, 2157}; // q16
+static const int skin_threshold = 1570636; // q18
+
+// Thresholds on luminance.
+static const int y_low = 20;
+static const int y_high = 220;
+
+// Evaluates the Mahalanobis distance measure for the input CbCr values.
+static int EvaluateSkinColorDifference(int cb, int cr) {
+ const int cb_q6 = cb << 6;
+ const int cr_q6 = cr << 6;
+ const int cb_diff_q12 = (cb_q6 - skin_mean[0]) * (cb_q6 - skin_mean[0]);
+ const int cbcr_diff_q12 = (cb_q6 - skin_mean[0]) * (cr_q6 - skin_mean[1]);
+ const int cr_diff_q12 = (cr_q6 - skin_mean[1]) * (cr_q6 - skin_mean[1]);
+ const int cb_diff_q2 = (cb_diff_q12 + (1 << 9)) >> 10;
+ const int cbcr_diff_q2 = (cbcr_diff_q12 + (1 << 9)) >> 10;
+ const int cr_diff_q2 = (cr_diff_q12 + (1 << 9)) >> 10;
+ const int skin_diff =
+ skin_inv_cov[0] * cb_diff_q2 + skin_inv_cov[1] * cbcr_diff_q2 +
+ skin_inv_cov[2] * cbcr_diff_q2 + skin_inv_cov[3] * cr_diff_q2;
+ return skin_diff;
+}
+
+bool MbHasSkinColor(const uint8_t* y_src,
+ const uint8_t* u_src,
+ const uint8_t* v_src,
+ const int stride_y,
+ const int stride_u,
+ const int stride_v,
+ const int mb_row,
+ const int mb_col) {
+ const uint8_t* y = y_src + ((mb_row << 4) + 8) * stride_y + (mb_col << 4) + 8;
+ const uint8_t* u = u_src + ((mb_row << 3) + 4) * stride_u + (mb_col << 3) + 4;
+ const uint8_t* v = v_src + ((mb_row << 3) + 4) * stride_v + (mb_col << 3) + 4;
+ // Use 2x2 average of center pixel to compute skin area.
+ uint8_t y_avg = (*y + *(y + 1) + *(y + stride_y) + *(y + stride_y + 1)) >> 2;
+ uint8_t u_avg = (*u + *(u + 1) + *(u + stride_u) + *(u + stride_u + 1)) >> 2;
+ uint8_t v_avg = (*v + *(v + 1) + *(v + stride_v) + *(v + stride_v + 1)) >> 2;
+ // Ignore MB with too high or low brightness.
+ if (y_avg < y_low || y_avg > y_high)
+ return false;
+ else
+ return (EvaluateSkinColorDifference(u_avg, v_avg) < skin_threshold);
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/util/skin_detection.h b/webrtc/modules/video_processing/util/skin_detection.h
new file mode 100755
index 0000000000..561c03c425
--- /dev/null
+++ b/webrtc/modules/video_processing/util/skin_detection.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_SKIN_DETECTION_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_SKIN_DETECTION_H_
+
+namespace webrtc {
+
+typedef unsigned char uint8_t;
+bool MbHasSkinColor(const uint8_t* y_src,
+ const uint8_t* u_src,
+ const uint8_t* v_src,
+ const int stride_y,
+ const int stride_u,
+ const int stride_v,
+ const int mb_row,
+ const int mb_col);
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_SKIN_DETECTION_H_
diff --git a/webrtc/modules/video_processing/main/source/video_decimator.cc b/webrtc/modules/video_processing/video_decimator.cc
index 34c29c1677..63e347b026 100644
--- a/webrtc/modules/video_processing/main/source/video_decimator.cc
+++ b/webrtc/modules/video_processing/video_decimator.cc
@@ -9,8 +9,8 @@
*/
#include "webrtc/base/checks.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/modules/video_processing/main/source/video_decimator.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/video_decimator.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#define VD_MIN(a, b) ((a) < (b)) ? (a) : (b)
@@ -23,7 +23,7 @@ VPMVideoDecimator::VPMVideoDecimator() {
VPMVideoDecimator::~VPMVideoDecimator() {}
-void VPMVideoDecimator::Reset() {
+void VPMVideoDecimator::Reset() {
overshoot_modifier_ = 0;
drop_count_ = 0;
keep_count_ = 0;
@@ -43,14 +43,17 @@ void VPMVideoDecimator::SetTargetFramerate(int frame_rate) {
}
bool VPMVideoDecimator::DropFrame() {
- if (!enable_temporal_decimation_) return false;
+ if (!enable_temporal_decimation_)
+ return false;
- if (incoming_frame_rate_ <= 0) return false;
+ if (incoming_frame_rate_ <= 0)
+ return false;
const uint32_t incomingframe_rate =
static_cast<uint32_t>(incoming_frame_rate_ + 0.5f);
- if (target_frame_rate_ == 0) return true;
+ if (target_frame_rate_ == 0)
+ return true;
bool drop = false;
if (incomingframe_rate > target_frame_rate_) {
@@ -61,44 +64,43 @@ bool VPMVideoDecimator::DropFrame() {
overshoot_modifier_ = 0;
}
- if (overshoot && 2 * overshoot < (int32_t) incomingframe_rate) {
+ if (overshoot && 2 * overshoot < (int32_t)incomingframe_rate) {
if (drop_count_) { // Just got here so drop to be sure.
- drop_count_ = 0;
- return true;
+ drop_count_ = 0;
+ return true;
}
const uint32_t dropVar = incomingframe_rate / overshoot;
if (keep_count_ >= dropVar) {
- drop = true;
- overshoot_modifier_ = -((int32_t) incomingframe_rate % overshoot) / 3;
- keep_count_ = 1;
+ drop = true;
+ overshoot_modifier_ = -((int32_t)incomingframe_rate % overshoot) / 3;
+ keep_count_ = 1;
} else {
- keep_count_++;
+ keep_count_++;
}
} else {
keep_count_ = 0;
const uint32_t dropVar = overshoot / target_frame_rate_;
if (drop_count_ < dropVar) {
- drop = true;
- drop_count_++;
+ drop = true;
+ drop_count_++;
} else {
- overshoot_modifier_ = overshoot % target_frame_rate_;
- drop = false;
- drop_count_ = 0;
+ overshoot_modifier_ = overshoot % target_frame_rate_;
+ drop = false;
+ drop_count_ = 0;
}
}
}
return drop;
}
-
-uint32_t VPMVideoDecimator::Decimatedframe_rate() {
-ProcessIncomingframe_rate(TickTime::MillisecondTimestamp());
+uint32_t VPMVideoDecimator::GetDecimatedFrameRate() {
+ ProcessIncomingframe_rate(TickTime::MillisecondTimestamp());
if (!enable_temporal_decimation_) {
return static_cast<uint32_t>(incoming_frame_rate_ + 0.5f);
}
return VD_MIN(target_frame_rate_,
- static_cast<uint32_t>(incoming_frame_rate_ + 0.5f));
+ static_cast<uint32_t>(incoming_frame_rate_ + 0.5f));
}
uint32_t VPMVideoDecimator::Inputframe_rate() {
@@ -113,7 +115,7 @@ void VPMVideoDecimator::UpdateIncomingframe_rate() {
} else {
// Shift.
for (int i = kFrameCountHistory_size - 2; i >= 0; i--) {
- incoming_frame_times_[i+1] = incoming_frame_times_[i];
+ incoming_frame_times_[i + 1] = incoming_frame_times_[i];
}
}
incoming_frame_times_[0] = now;
@@ -133,7 +135,7 @@ void VPMVideoDecimator::ProcessIncomingframe_rate(int64_t now) {
}
}
if (num > 1) {
- int64_t diff = now - incoming_frame_times_[num-1];
+ int64_t diff = now - incoming_frame_times_[num - 1];
incoming_frame_rate_ = 1.0;
if (diff > 0) {
incoming_frame_rate_ = nrOfFrames * 1000.0f / static_cast<float>(diff);
diff --git a/webrtc/modules/video_processing/main/source/video_decimator.h b/webrtc/modules/video_processing/video_decimator.h
index 3d4573caf8..1b871df8c3 100644
--- a/webrtc/modules/video_processing/main/source/video_decimator.h
+++ b/webrtc/modules/video_processing/video_decimator.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_VIDEO_DECIMATOR_H
-#define WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_VIDEO_DECIMATOR_H
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_DECIMATOR_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_DECIMATOR_H_
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -32,7 +32,7 @@ class VPMVideoDecimator {
void UpdateIncomingframe_rate();
// Get Decimated Frame Rate/Dimensions.
- uint32_t Decimatedframe_rate();
+ uint32_t GetDecimatedFrameRate();
// Get input frame rate.
uint32_t Inputframe_rate();
@@ -40,8 +40,8 @@ class VPMVideoDecimator {
private:
void ProcessIncomingframe_rate(int64_t now);
- enum { kFrameCountHistory_size = 90};
- enum { kFrameHistoryWindowMs = 2000};
+ enum { kFrameCountHistory_size = 90 };
+ enum { kFrameHistoryWindowMs = 2000 };
// Temporal decimation.
int32_t overshoot_modifier_;
@@ -55,4 +55,4 @@ class VPMVideoDecimator {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_VIDEO_DECIMATOR_H
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_DECIMATOR_H_
diff --git a/webrtc/modules/video_processing/video_denoiser.cc b/webrtc/modules/video_processing/video_denoiser.cc
new file mode 100644
index 0000000000..4902a89491
--- /dev/null
+++ b/webrtc/modules/video_processing/video_denoiser.cc
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "webrtc/common_video/libyuv/include/scaler.h"
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/modules/video_processing/video_denoiser.h"
+
+namespace webrtc {
+
+VideoDenoiser::VideoDenoiser(bool runtime_cpu_detection)
+ : width_(0),
+ height_(0),
+ filter_(DenoiserFilter::Create(runtime_cpu_detection)) {}
+
+void VideoDenoiser::TrailingReduction(int mb_rows,
+ int mb_cols,
+ const uint8_t* y_src,
+ int stride_y,
+ uint8_t* y_dst) {
+ for (int mb_row = 1; mb_row < mb_rows - 1; ++mb_row) {
+ for (int mb_col = 1; mb_col < mb_cols - 1; ++mb_col) {
+ int mb_index = mb_row * mb_cols + mb_col;
+ uint8_t* mb_dst = y_dst + (mb_row << 4) * stride_y + (mb_col << 4);
+ const uint8_t* mb_src = y_src + (mb_row << 4) * stride_y + (mb_col << 4);
+ // If the number of denoised neighbors is less than a threshold,
+ // do NOT denoise for the block. Set different threshold for skin MB.
+ // The change of denoising status will not propagate.
+ if (metrics_[mb_index].is_skin) {
+ // The threshold is high (more strict) for non-skin MB where the
+ // trailing usually happen.
+ if (metrics_[mb_index].denoise &&
+ metrics_[mb_index + 1].denoise + metrics_[mb_index - 1].denoise +
+ metrics_[mb_index + mb_cols].denoise +
+ metrics_[mb_index - mb_cols].denoise <=
+ 2) {
+ metrics_[mb_index].denoise = 0;
+ filter_->CopyMem16x16(mb_src, stride_y, mb_dst, stride_y);
+ }
+ } else if (metrics_[mb_index].denoise &&
+ metrics_[mb_index + 1].denoise +
+ metrics_[mb_index - 1].denoise +
+ metrics_[mb_index + mb_cols + 1].denoise +
+ metrics_[mb_index + mb_cols - 1].denoise +
+ metrics_[mb_index - mb_cols + 1].denoise +
+ metrics_[mb_index - mb_cols - 1].denoise +
+ metrics_[mb_index + mb_cols].denoise +
+ metrics_[mb_index - mb_cols].denoise <=
+ 7) {
+ filter_->CopyMem16x16(mb_src, stride_y, mb_dst, stride_y);
+ }
+ }
+ }
+}
+
+void VideoDenoiser::DenoiseFrame(const VideoFrame& frame,
+ VideoFrame* denoised_frame) {
+ int stride_y = frame.stride(kYPlane);
+ int stride_u = frame.stride(kUPlane);
+ int stride_v = frame.stride(kVPlane);
+ // If previous width and height are different from current frame's, then no
+ // denoising for the current frame.
+ if (width_ != frame.width() || height_ != frame.height()) {
+ width_ = frame.width();
+ height_ = frame.height();
+ denoised_frame->CreateFrame(frame.buffer(kYPlane), frame.buffer(kUPlane),
+ frame.buffer(kVPlane), width_, height_,
+ stride_y, stride_u, stride_v);
+ // Setting time parameters to the output frame.
+ denoised_frame->set_timestamp(frame.timestamp());
+ denoised_frame->set_render_time_ms(frame.render_time_ms());
+ return;
+ }
+ // For 16x16 block.
+ int mb_cols = width_ >> 4;
+ int mb_rows = height_ >> 4;
+ if (metrics_.get() == nullptr)
+ metrics_.reset(new DenoiseMetrics[mb_cols * mb_rows]());
+ // Denoise on Y plane.
+ uint8_t* y_dst = denoised_frame->buffer(kYPlane);
+ uint8_t* u_dst = denoised_frame->buffer(kUPlane);
+ uint8_t* v_dst = denoised_frame->buffer(kVPlane);
+ const uint8_t* y_src = frame.buffer(kYPlane);
+ const uint8_t* u_src = frame.buffer(kUPlane);
+ const uint8_t* v_src = frame.buffer(kVPlane);
+ // Temporary buffer to store denoising result.
+ uint8_t y_tmp[16 * 16] = {0};
+ for (int mb_row = 0; mb_row < mb_rows; ++mb_row) {
+ for (int mb_col = 0; mb_col < mb_cols; ++mb_col) {
+ const uint8_t* mb_src = y_src + (mb_row << 4) * stride_y + (mb_col << 4);
+ uint8_t* mb_dst = y_dst + (mb_row << 4) * stride_y + (mb_col << 4);
+ int mb_index = mb_row * mb_cols + mb_col;
+ // Denoise each MB at the very start and save the result to a temporary
+ // buffer.
+ if (filter_->MbDenoise(mb_dst, stride_y, y_tmp, 16, mb_src, stride_y, 0,
+ 1) == FILTER_BLOCK) {
+ uint32_t thr_var = 0;
+ // Save var and sad to the buffer.
+ metrics_[mb_index].var = filter_->Variance16x8(
+ mb_dst, stride_y, y_tmp, 16, &metrics_[mb_index].sad);
+ // Get skin map.
+ metrics_[mb_index].is_skin = MbHasSkinColor(
+ y_src, u_src, v_src, stride_y, stride_u, stride_v, mb_row, mb_col);
+ // Variance threshold for skin/non-skin MB is different.
+ // Skin MB use a small threshold to reduce blockiness.
+ thr_var = metrics_[mb_index].is_skin ? 128 : 12 * 128;
+ if (metrics_[mb_index].var > thr_var) {
+ metrics_[mb_index].denoise = 0;
+ // Use the source MB.
+ filter_->CopyMem16x16(mb_src, stride_y, mb_dst, stride_y);
+ } else {
+ metrics_[mb_index].denoise = 1;
+ // Use the denoised MB.
+ filter_->CopyMem16x16(y_tmp, 16, mb_dst, stride_y);
+ }
+ } else {
+ metrics_[mb_index].denoise = 0;
+ filter_->CopyMem16x16(mb_src, stride_y, mb_dst, stride_y);
+ }
+ // Copy source U/V plane.
+ const uint8_t* mb_src_u =
+ u_src + (mb_row << 3) * stride_u + (mb_col << 3);
+ const uint8_t* mb_src_v =
+ v_src + (mb_row << 3) * stride_v + (mb_col << 3);
+ uint8_t* mb_dst_u = u_dst + (mb_row << 3) * stride_u + (mb_col << 3);
+ uint8_t* mb_dst_v = v_dst + (mb_row << 3) * stride_v + (mb_col << 3);
+ filter_->CopyMem8x8(mb_src_u, stride_u, mb_dst_u, stride_u);
+ filter_->CopyMem8x8(mb_src_v, stride_v, mb_dst_v, stride_v);
+ }
+ }
+ // Second round.
+ // This is to reduce the trailing artifact and blockiness by referring
+ // neighbors' denoising status.
+ TrailingReduction(mb_rows, mb_cols, y_src, stride_y, y_dst);
+
+ // Setting time parameters to the output frame.
+ denoised_frame->set_timestamp(frame.timestamp());
+ denoised_frame->set_render_time_ms(frame.render_time_ms());
+ return;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/video_denoiser.h b/webrtc/modules/video_processing/video_denoiser.h
new file mode 100644
index 0000000000..107a15ca07
--- /dev/null
+++ b/webrtc/modules/video_processing/video_denoiser.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_DENOISER_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_DENOISER_H_
+
+#include "webrtc/modules/video_processing/util/denoiser_filter.h"
+#include "webrtc/modules/video_processing/util/skin_detection.h"
+
+namespace webrtc {
+
+class VideoDenoiser {
+ public:
+ explicit VideoDenoiser(bool runtime_cpu_detection);
+ void DenoiseFrame(const VideoFrame& frame, VideoFrame* denoised_frame);
+
+ private:
+ void TrailingReduction(int mb_rows,
+ int mb_cols,
+ const uint8_t* y_src,
+ int stride_y,
+ uint8_t* y_dst);
+ int width_;
+ int height_;
+ rtc::scoped_ptr<DenoiseMetrics[]> metrics_;
+ rtc::scoped_ptr<DenoiserFilter> filter_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_DENOISER_H_
diff --git a/webrtc/modules/video_processing/video_processing.gypi b/webrtc/modules/video_processing/video_processing.gypi
index 5827a5b1a6..7418c455a2 100644
--- a/webrtc/modules/video_processing/video_processing.gypi
+++ b/webrtc/modules/video_processing/video_processing.gypi
@@ -18,29 +18,38 @@
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
],
'sources': [
- 'main/interface/video_processing.h',
- 'main/interface/video_processing_defines.h',
- 'main/source/brighten.cc',
- 'main/source/brighten.h',
- 'main/source/brightness_detection.cc',
- 'main/source/brightness_detection.h',
- 'main/source/content_analysis.cc',
- 'main/source/content_analysis.h',
- 'main/source/deflickering.cc',
- 'main/source/deflickering.h',
- 'main/source/frame_preprocessor.cc',
- 'main/source/frame_preprocessor.h',
- 'main/source/spatial_resampler.cc',
- 'main/source/spatial_resampler.h',
- 'main/source/video_decimator.cc',
- 'main/source/video_decimator.h',
- 'main/source/video_processing_impl.cc',
- 'main/source/video_processing_impl.h',
+ 'include/video_processing.h',
+ 'include/video_processing_defines.h',
+ 'brightness_detection.cc',
+ 'brightness_detection.h',
+ 'content_analysis.cc',
+ 'content_analysis.h',
+ 'deflickering.cc',
+ 'deflickering.h',
+ 'frame_preprocessor.cc',
+ 'frame_preprocessor.h',
+ 'spatial_resampler.cc',
+ 'spatial_resampler.h',
+ 'video_decimator.cc',
+ 'video_decimator.h',
+ 'video_processing_impl.cc',
+ 'video_processing_impl.h',
+ 'video_denoiser.cc',
+ 'video_denoiser.h',
+ 'util/denoiser_filter.cc',
+ 'util/denoiser_filter.h',
+ 'util/denoiser_filter_c.cc',
+ 'util/denoiser_filter_c.h',
+ 'util/skin_detection.cc',
+ 'util/skin_detection.h',
],
'conditions': [
['target_arch=="ia32" or target_arch=="x64"', {
'dependencies': [ 'video_processing_sse2', ],
}],
+ ['target_arch=="arm" or target_arch == "arm64"', {
+ 'dependencies': [ 'video_processing_neon', ],
+ }],
],
},
],
@@ -51,7 +60,9 @@
'target_name': 'video_processing_sse2',
'type': 'static_library',
'sources': [
- 'main/source/content_analysis_sse2.cc',
+ 'content_analysis_sse2.cc',
+ 'util/denoiser_filter_sse2.cc',
+ 'util/denoiser_filter_sse2.h',
],
'conditions': [
['os_posix==1 and OS!="mac"', {
@@ -66,6 +77,19 @@
},
],
}],
+ ['target_arch=="arm" or target_arch == "arm64"', {
+ 'targets': [
+ {
+ 'target_name': 'video_processing_neon',
+ 'type': 'static_library',
+ 'includes': [ '../../build/arm_neon.gypi', ],
+ 'sources': [
+ 'util/denoiser_filter_neon.cc',
+ 'util/denoiser_filter_neon.h',
+ ],
+ },
+ ],
+ }],
],
}
diff --git a/webrtc/modules/video_processing/video_processing_impl.cc b/webrtc/modules/video_processing/video_processing_impl.cc
new file mode 100644
index 0000000000..f34886f10f
--- /dev/null
+++ b/webrtc/modules/video_processing/video_processing_impl.cc
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_processing/video_processing_impl.h"
+
+#include <assert.h>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+
+namespace webrtc {
+
+namespace {
+
+int GetSubSamplingFactor(int width, int height) {
+ if (width * height >= 640 * 480) {
+ return 3;
+ } else if (width * height >= 352 * 288) {
+ return 2;
+ } else if (width * height >= 176 * 144) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+} // namespace
+
+VideoProcessing* VideoProcessing::Create() {
+ return new VideoProcessingImpl();
+}
+
+VideoProcessingImpl::VideoProcessingImpl() {}
+VideoProcessingImpl::~VideoProcessingImpl() {}
+
+void VideoProcessing::GetFrameStats(const VideoFrame& frame,
+ FrameStats* stats) {
+ ClearFrameStats(stats); // The histogram needs to be zeroed out.
+ if (frame.IsZeroSize()) {
+ return;
+ }
+
+ int width = frame.width();
+ int height = frame.height();
+ stats->sub_sampling_factor = GetSubSamplingFactor(width, height);
+
+ const uint8_t* buffer = frame.buffer(kYPlane);
+ // Compute histogram and sum of frame
+ for (int i = 0; i < height; i += (1 << stats->sub_sampling_factor)) {
+ int k = i * width;
+ for (int j = 0; j < width; j += (1 << stats->sub_sampling_factor)) {
+ stats->hist[buffer[k + j]]++;
+ stats->sum += buffer[k + j];
+ }
+ }
+
+ stats->num_pixels = (width * height) / ((1 << stats->sub_sampling_factor) *
+ (1 << stats->sub_sampling_factor));
+ assert(stats->num_pixels > 0);
+
+ // Compute mean value of frame
+ stats->mean = stats->sum / stats->num_pixels;
+}
+
+bool VideoProcessing::ValidFrameStats(const FrameStats& stats) {
+ if (stats.num_pixels == 0) {
+ LOG(LS_WARNING) << "Invalid frame stats.";
+ return false;
+ }
+ return true;
+}
+
+void VideoProcessing::ClearFrameStats(FrameStats* stats) {
+ stats->mean = 0;
+ stats->sum = 0;
+ stats->num_pixels = 0;
+ stats->sub_sampling_factor = 0;
+ memset(stats->hist, 0, sizeof(stats->hist));
+}
+
+void VideoProcessing::Brighten(int delta, VideoFrame* frame) {
+ RTC_DCHECK(!frame->IsZeroSize());
+ RTC_DCHECK(frame->width() > 0);
+ RTC_DCHECK(frame->height() > 0);
+
+ int num_pixels = frame->width() * frame->height();
+
+ int look_up[256];
+ for (int i = 0; i < 256; i++) {
+ int val = i + delta;
+ look_up[i] = ((((val < 0) ? 0 : val) > 255) ? 255 : val);
+ }
+
+ uint8_t* temp_ptr = frame->buffer(kYPlane);
+ for (int i = 0; i < num_pixels; i++) {
+ *temp_ptr = static_cast<uint8_t>(look_up[*temp_ptr]);
+ temp_ptr++;
+ }
+}
+
+int32_t VideoProcessingImpl::Deflickering(VideoFrame* frame,
+ FrameStats* stats) {
+ rtc::CritScope mutex(&mutex_);
+ return deflickering_.ProcessFrame(frame, stats);
+}
+
+int32_t VideoProcessingImpl::BrightnessDetection(const VideoFrame& frame,
+ const FrameStats& stats) {
+ rtc::CritScope mutex(&mutex_);
+ return brightness_detection_.ProcessFrame(frame, stats);
+}
+
+void VideoProcessingImpl::EnableTemporalDecimation(bool enable) {
+ rtc::CritScope mutex(&mutex_);
+ frame_pre_processor_.EnableTemporalDecimation(enable);
+}
+
+void VideoProcessingImpl::SetInputFrameResampleMode(
+ VideoFrameResampling resampling_mode) {
+ rtc::CritScope cs(&mutex_);
+ frame_pre_processor_.SetInputFrameResampleMode(resampling_mode);
+}
+
+int32_t VideoProcessingImpl::SetTargetResolution(uint32_t width,
+ uint32_t height,
+ uint32_t frame_rate) {
+ rtc::CritScope cs(&mutex_);
+ return frame_pre_processor_.SetTargetResolution(width, height, frame_rate);
+}
+
+void VideoProcessingImpl::SetTargetFramerate(int frame_rate) {
+ rtc::CritScope cs(&mutex_);
+ frame_pre_processor_.SetTargetFramerate(frame_rate);
+}
+
+uint32_t VideoProcessingImpl::GetDecimatedFrameRate() {
+ rtc::CritScope cs(&mutex_);
+ return frame_pre_processor_.GetDecimatedFrameRate();
+}
+
+uint32_t VideoProcessingImpl::GetDecimatedWidth() const {
+ rtc::CritScope cs(&mutex_);
+ return frame_pre_processor_.GetDecimatedWidth();
+}
+
+uint32_t VideoProcessingImpl::GetDecimatedHeight() const {
+ rtc::CritScope cs(&mutex_);
+ return frame_pre_processor_.GetDecimatedHeight();
+}
+
+void VideoProcessingImpl::EnableDenosing(bool enable) {
+ rtc::CritScope cs(&mutex_);
+ frame_pre_processor_.EnableDenosing(enable);
+}
+
+const VideoFrame* VideoProcessingImpl::PreprocessFrame(
+ const VideoFrame& frame) {
+ rtc::CritScope mutex(&mutex_);
+ return frame_pre_processor_.PreprocessFrame(frame);
+}
+
+VideoContentMetrics* VideoProcessingImpl::GetContentMetrics() const {
+ rtc::CritScope mutex(&mutex_);
+ return frame_pre_processor_.GetContentMetrics();
+}
+
+void VideoProcessingImpl::EnableContentAnalysis(bool enable) {
+ rtc::CritScope mutex(&mutex_);
+ frame_pre_processor_.EnableContentAnalysis(enable);
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/video_processing_impl.h b/webrtc/modules/video_processing/video_processing_impl.h
new file mode 100644
index 0000000000..edbaba12fa
--- /dev/null
+++ b/webrtc/modules/video_processing/video_processing_impl.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_PROCESSING_IMPL_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_PROCESSING_IMPL_H_
+
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/brightness_detection.h"
+#include "webrtc/modules/video_processing/deflickering.h"
+#include "webrtc/modules/video_processing/frame_preprocessor.h"
+
+namespace webrtc {
+class CriticalSectionWrapper;
+
+class VideoProcessingImpl : public VideoProcessing {
+ public:
+ VideoProcessingImpl();
+ ~VideoProcessingImpl() override;
+
+ // Implements VideoProcessing.
+ int32_t Deflickering(VideoFrame* frame, FrameStats* stats) override;
+ int32_t BrightnessDetection(const VideoFrame& frame,
+ const FrameStats& stats) override;
+ void EnableTemporalDecimation(bool enable) override;
+ void SetInputFrameResampleMode(VideoFrameResampling resampling_mode) override;
+ void EnableContentAnalysis(bool enable) override;
+ int32_t SetTargetResolution(uint32_t width,
+ uint32_t height,
+ uint32_t frame_rate) override;
+ void SetTargetFramerate(int frame_rate) override;
+ uint32_t GetDecimatedFrameRate() override;
+ uint32_t GetDecimatedWidth() const override;
+ uint32_t GetDecimatedHeight() const override;
+ void EnableDenosing(bool enable) override;
+ const VideoFrame* PreprocessFrame(const VideoFrame& frame) override;
+ VideoContentMetrics* GetContentMetrics() const override;
+
+ private:
+ mutable rtc::CriticalSection mutex_;
+ VPMDeflickering deflickering_ GUARDED_BY(mutex_);
+ VPMBrightnessDetection brightness_detection_;
+ VPMFramePreprocessor frame_pre_processor_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_PROCESSING_IMPL_H_