aboutsummaryrefslogtreecommitdiff
path: root/webrtc/modules/video_processing/test
diff options
context:
space:
mode:
Diffstat (limited to 'webrtc/modules/video_processing/test')
-rw-r--r--webrtc/modules/video_processing/test/brightness_detection_test.cc120
-rw-r--r--webrtc/modules/video_processing/test/content_metrics_test.cc48
-rw-r--r--webrtc/modules/video_processing/test/createTable.m179
-rw-r--r--webrtc/modules/video_processing/test/deflickering_test.cc98
-rw-r--r--webrtc/modules/video_processing/test/denoiser_test.cc156
-rw-r--r--webrtc/modules/video_processing/test/readYUV420file.m45
-rw-r--r--webrtc/modules/video_processing/test/video_processing_unittest.cc415
-rw-r--r--webrtc/modules/video_processing/test/video_processing_unittest.h47
-rw-r--r--webrtc/modules/video_processing/test/writeYUV420file.m22
9 files changed, 1130 insertions, 0 deletions
diff --git a/webrtc/modules/video_processing/test/brightness_detection_test.cc b/webrtc/modules/video_processing/test/brightness_detection_test.cc
new file mode 100644
index 0000000000..669bb183e5
--- /dev/null
+++ b/webrtc/modules/video_processing/test/brightness_detection_test.cc
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
+
+namespace webrtc {
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_BrightnessDetection DISABLED_BrightnessDetection
+#else
+#define MAYBE_BrightnessDetection BrightnessDetection
+#endif
+TEST_F(VideoProcessingTest, MAYBE_BrightnessDetection) {
+ uint32_t frameNum = 0;
+ int32_t brightnessWarning = 0;
+ uint32_t warningCount = 0;
+ rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_) {
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+ frameNum++;
+ VideoProcessing::FrameStats stats;
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_, stats),
+ 0);
+ if (brightnessWarning != VideoProcessing::kNoWarning) {
+ warningCount++;
+ }
+ }
+ ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
+
+ // Expect few warnings
+ float warningProportion = static_cast<float>(warningCount) / frameNum * 100;
+ printf("\nWarning proportions:\n");
+ printf("Stock foreman: %.1f %%\n", warningProportion);
+ EXPECT_LT(warningProportion, 10);
+
+ rewind(source_file_);
+ frameNum = 0;
+ warningCount = 0;
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_ &&
+ frameNum < 300) {
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+ frameNum++;
+
+ uint8_t* frame = video_frame_.buffer(kYPlane);
+ uint32_t yTmp = 0;
+ for (int yIdx = 0; yIdx < width_ * height_; yIdx++) {
+ yTmp = frame[yIdx] << 1;
+ if (yTmp > 255) {
+ yTmp = 255;
+ }
+ frame[yIdx] = static_cast<uint8_t>(yTmp);
+ }
+
+ VideoProcessing::FrameStats stats;
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_, stats),
+ 0);
+ EXPECT_NE(VideoProcessing::kDarkWarning, brightnessWarning);
+ if (brightnessWarning == VideoProcessing::kBrightWarning) {
+ warningCount++;
+ }
+ }
+ ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
+
+ // Expect many brightness warnings
+ warningProportion = static_cast<float>(warningCount) / frameNum * 100;
+ printf("Bright foreman: %.1f %%\n", warningProportion);
+ EXPECT_GT(warningProportion, 95);
+
+ rewind(source_file_);
+ frameNum = 0;
+ warningCount = 0;
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_ &&
+ frameNum < 300) {
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+ frameNum++;
+
+ uint8_t* y_plane = video_frame_.buffer(kYPlane);
+ int32_t yTmp = 0;
+ for (int yIdx = 0; yIdx < width_ * height_; yIdx++) {
+ yTmp = y_plane[yIdx] >> 1;
+ y_plane[yIdx] = static_cast<uint8_t>(yTmp);
+ }
+
+ VideoProcessing::FrameStats stats;
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_, stats),
+ 0);
+ EXPECT_NE(VideoProcessing::kBrightWarning, brightnessWarning);
+ if (brightnessWarning == VideoProcessing::kDarkWarning) {
+ warningCount++;
+ }
+ }
+ ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
+
+ // Expect many darkness warnings
+ warningProportion = static_cast<float>(warningCount) / frameNum * 100;
+ printf("Dark foreman: %.1f %%\n\n", warningProportion);
+ EXPECT_GT(warningProportion, 90);
+}
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/test/content_metrics_test.cc b/webrtc/modules/video_processing/test/content_metrics_test.cc
new file mode 100644
index 0000000000..782f9cff59
--- /dev/null
+++ b/webrtc/modules/video_processing/test/content_metrics_test.cc
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/content_analysis.h"
+#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
+
+namespace webrtc {
+
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_ContentAnalysis) {
+#else
+TEST_F(VideoProcessingTest, ContentAnalysis) {
+#endif
+ VPMContentAnalysis ca__c(false);
+ VPMContentAnalysis ca__sse(true);
+ VideoContentMetrics* _cM_c;
+ VideoContentMetrics* _cM_SSE;
+
+ ca__c.Initialize(width_, height_);
+ ca__sse.Initialize(width_, height_);
+
+ rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_) {
+ // Using ConvertToI420 to add stride to the image.
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+ _cM_c = ca__c.ComputeContentMetrics(video_frame_);
+ _cM_SSE = ca__sse.ComputeContentMetrics(video_frame_);
+
+ ASSERT_EQ(_cM_c->spatial_pred_err, _cM_SSE->spatial_pred_err);
+ ASSERT_EQ(_cM_c->spatial_pred_err_v, _cM_SSE->spatial_pred_err_v);
+ ASSERT_EQ(_cM_c->spatial_pred_err_h, _cM_SSE->spatial_pred_err_h);
+ ASSERT_EQ(_cM_c->motion_magnitude, _cM_SSE->motion_magnitude);
+ }
+ ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/test/createTable.m b/webrtc/modules/video_processing/test/createTable.m
new file mode 100644
index 0000000000..fe8777ee71
--- /dev/null
+++ b/webrtc/modules/video_processing/test/createTable.m
@@ -0,0 +1,179 @@
+% Create the color enhancement look-up table and write it to
+% file colorEnhancementTable.cpp. Copy contents of that file into
+% the source file for the color enhancement function.
+
+clear
+close all
+
+
+% First, define the color enhancement in a normalized domain
+
+% Compander function is defined in three radial zones.
+% 1. From 0 to radius r0, the compander function
+% is a second-order polynomial intersecting the points (0,0)
+% and (r0, r0), and with a slope B in (0,0).
+% 2. From r0 to r1, the compander is a third-order polynomial
+% intersecting the points (r0, r0) and (r1, r1), and with the
+% same slope as the first part in the point (r0, r0) and slope
+% equal to 1 in (r1, r1).
+% 3. For radii larger than r1, the compander function is the
+% unity scale function (no scaling at all).
+
+r0=0.07; % Dead zone radius (must be > 0)
+r1=0.6; % Enhancement zone radius (must be > r0 and < 1)
+B=0.2; % initial slope of compander function (between 0 and 1)
+
+x0=linspace(0,r0).'; % zone 1
+x1=linspace(r0,r1).'; % zone 2
+x2=linspace(r1,1).'; % zone 3
+
+A=(1-B)/r0;
+f0=A*x0.^2+B*x0; % compander function in zone 1
+
+% equation system for finding second zone parameters
+M=[r0^3 r0^2 r0 1;
+ 3*r0^2 2*r0 1 0;
+ 3*r1^2 2*r1 1 0;
+ r1^3 r1^2 r1 1];
+m=[A*r0^2+B*r0; 2*A*r0+B; 1; r1];
+% solve equations
+theta=M\m;
+
+% compander function in zone 1
+f1=[x1.^3 x1.^2 x1 ones(size(x1))]*theta;
+
+x=[x0; x1; x2];
+f=[f0; f1; x2];
+
+% plot it
+figure(1)
+plot(x,f,x,x,':')
+xlabel('Normalized radius')
+ylabel('Modified radius')
+
+
+% Now, create the look-up table in the integer color space
+[U,V]=meshgrid(0:255, 0:255); % U-V space
+U0=U;
+V0=V;
+
+% Conversion matrix from normalized YUV to RGB
+T=[1 0 1.13983; 1 -0.39465 -0.58060; 1 2.03211 0];
+Ylum=0.5;
+
+figure(2)
+Z(:,:,1)=Ylum + (U-127)/256*T(1,2) + (V-127)/256*T(1,3);
+Z(:,:,2)=Ylum + (U-127)/256*T(2,2) + (V-127)/256*T(2,3);
+Z(:,:,3)=Ylum + (U-127)/256*T(3,2) + (V-127)/256*T(3,3);
+Z=max(Z,0);
+Z=min(Z,1);
+subplot(121)
+image(Z);
+axis square
+axis off
+set(gcf,'color','k')
+
+R = sqrt((U-127).^2 + (V-127).^2);
+Rnorm = R/127;
+RnormMod = Rnorm;
+RnormMod(RnormMod==0)=1; % avoid division with zero
+
+% find indices to pixels in dead-zone (zone 1)
+ix=find(Rnorm<=r0);
+scaleMatrix = (A*Rnorm(ix).^2 + B*Rnorm(ix))./RnormMod(ix);
+U(ix)=(U(ix)-127).*scaleMatrix+127;
+V(ix)=(V(ix)-127).*scaleMatrix+127;
+
+% find indices to pixels in zone 2
+ix=find(Rnorm>r0 & Rnorm<=r1);
+scaleMatrix = (theta(1)*Rnorm(ix).^3 + theta(2)*Rnorm(ix).^2 + ...
+ theta(3)*Rnorm(ix) + theta(4)) ./ RnormMod(ix);
+U(ix)=(U(ix)-127).*scaleMatrix + 127;
+V(ix)=(V(ix)-127).*scaleMatrix + 127;
+
+% round to integer values and saturate
+U=round(U);
+V=round(V);
+U=max(min(U,255),0);
+V=max(min(V,255),0);
+
+Z(:,:,1)=Ylum + (U-127)/256*T(1,2) + (V-127)/256*T(1,3);
+Z(:,:,2)=Ylum + (U-127)/256*T(2,2) + (V-127)/256*T(2,3);
+Z(:,:,3)=Ylum + (U-127)/256*T(3,2) + (V-127)/256*T(3,3);
+Z=max(Z,0);
+Z=min(Z,1);
+subplot(122)
+image(Z);
+axis square
+axis off
+
+figure(3)
+subplot(121)
+mesh(U-U0)
+subplot(122)
+mesh(V-V0)
+
+
+
+% Last, write to file
+% Write only one matrix, since U=V'
+
+fid = fopen('../out/Debug/colorEnhancementTable.h','wt');
+if fid==-1
+ error('Cannot open file colorEnhancementTable.cpp');
+end
+
+fprintf(fid,'//colorEnhancementTable.h\n\n');
+fprintf(fid,'//Copy the constant table to the appropriate header file.\n\n');
+
+fprintf(fid,'//Table created with Matlab script createTable.m\n\n');
+fprintf(fid,'//Usage:\n');
+fprintf(fid,'// Umod=colorTable[U][V]\n');
+fprintf(fid,'// Vmod=colorTable[V][U]\n');
+
+fprintf(fid,'static unsigned char colorTable[%i][%i] = {\n', size(U,1), size(U,2));
+
+for u=1:size(U,2)
+ fprintf(fid,' {%i', U(1,u));
+ for v=2:size(U,1)
+ fprintf(fid,', %i', U(v,u));
+ end
+ fprintf(fid,'}');
+ if u<size(U,2)
+ fprintf(fid,',');
+ end
+ fprintf(fid,'\n');
+end
+fprintf(fid,'};\n\n');
+fclose(fid);
+fprintf('done');
+
+
+answ=input('Create test vector (takes some time...)? y/n : ','s');
+if answ ~= 'y'
+ return
+end
+
+% Also, create test vectors
+
+% Read test file foreman.yuv
+fprintf('Reading test file...')
+[y,u,v]=readYUV420file('../out/Debug/testFiles/foreman_cif.yuv',352,288);
+fprintf(' done\n');
+unew=uint8(zeros(size(u)));
+vnew=uint8(zeros(size(v)));
+
+% traverse all frames
+for k=1:size(y,3)
+ fprintf('Frame %i\n', k);
+ for r=1:size(u,1)
+ for c=1:size(u,2)
+ unew(r,c,k) = uint8(U(double(v(r,c,k))+1, double(u(r,c,k))+1));
+ vnew(r,c,k) = uint8(V(double(v(r,c,k))+1, double(u(r,c,k))+1));
+ end
+ end
+end
+
+fprintf('\nWriting modified test file...')
+writeYUV420file('../out/Debug/foremanColorEnhanced.yuv',y,unew,vnew);
+fprintf(' done\n');
diff --git a/webrtc/modules/video_processing/test/deflickering_test.cc b/webrtc/modules/video_processing/test/deflickering_test.cc
new file mode 100644
index 0000000000..5410015b06
--- /dev/null
+++ b/webrtc/modules/video_processing/test/deflickering_test.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
+#include "webrtc/system_wrappers/include/tick_util.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_Deflickering) {
+#else
+TEST_F(VideoProcessingTest, Deflickering) {
+#endif
+ enum { NumRuns = 30 };
+ uint32_t frameNum = 0;
+ const uint32_t frame_rate = 15;
+
+ int64_t min_runtime = 0;
+ int64_t avg_runtime = 0;
+
+ // Close automatically opened Foreman.
+ fclose(source_file_);
+ const std::string input_file =
+ webrtc::test::ResourcePath("deflicker_before_cif_short", "yuv");
+ source_file_ = fopen(input_file.c_str(), "rb");
+ ASSERT_TRUE(source_file_ != NULL) << "Cannot read input file: " << input_file
+ << "\n";
+
+ const std::string output_file =
+ webrtc::test::OutputPath() + "deflicker_output_cif_short.yuv";
+ FILE* deflickerFile = fopen(output_file.c_str(), "wb");
+ ASSERT_TRUE(deflickerFile != NULL)
+ << "Could not open output file: " << output_file << "\n";
+
+ printf("\nRun time [us / frame]:\n");
+ rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
+ for (uint32_t run_idx = 0; run_idx < NumRuns; run_idx++) {
+ TickTime t0;
+ TickTime t1;
+ TickInterval acc_ticks;
+ uint32_t timeStamp = 1;
+
+ frameNum = 0;
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_) {
+ frameNum++;
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_,
+ height_, 0, kVideoRotation_0, &video_frame_));
+ video_frame_.set_timestamp(timeStamp);
+
+ t0 = TickTime::Now();
+ VideoProcessing::FrameStats stats;
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ ASSERT_EQ(0, vp_->Deflickering(&video_frame_, &stats));
+ t1 = TickTime::Now();
+ acc_ticks += (t1 - t0);
+
+ if (run_idx == 0) {
+ if (PrintVideoFrame(video_frame_, deflickerFile) < 0) {
+ return;
+ }
+ }
+ timeStamp += (90000 / frame_rate);
+ }
+ ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
+
+ printf("%u\n", static_cast<int>(acc_ticks.Microseconds() / frameNum));
+ if (acc_ticks.Microseconds() < min_runtime || run_idx == 0) {
+ min_runtime = acc_ticks.Microseconds();
+ }
+ avg_runtime += acc_ticks.Microseconds();
+
+ rewind(source_file_);
+ }
+ ASSERT_EQ(0, fclose(deflickerFile));
+ // TODO(kjellander): Add verification of deflicker output file.
+
+ printf("\nAverage run time = %d us / frame\n",
+ static_cast<int>(avg_runtime / frameNum / NumRuns));
+ printf("Min run time = %d us / frame\n\n",
+ static_cast<int>(min_runtime / frameNum));
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/test/denoiser_test.cc b/webrtc/modules/video_processing/test/denoiser_test.cc
new file mode 100644
index 0000000000..551a77617d
--- /dev/null
+++ b/webrtc/modules/video_processing/test/denoiser_test.cc
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
+#include "webrtc/modules/video_processing/video_denoiser.h"
+
+namespace webrtc {
+
+TEST_F(VideoProcessingTest, CopyMem) {
+ rtc::scoped_ptr<DenoiserFilter> df_c(DenoiserFilter::Create(false));
+ rtc::scoped_ptr<DenoiserFilter> df_sse_neon(DenoiserFilter::Create(true));
+ uint8_t src[16 * 16], dst[16 * 16];
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ src[i * 16 + j] = i * 16 + j;
+ }
+ }
+
+ memset(dst, 0, 8 * 8);
+ df_c->CopyMem8x8(src, 8, dst, 8);
+ EXPECT_EQ(0, memcmp(src, dst, 8 * 8));
+
+ memset(dst, 0, 16 * 16);
+ df_c->CopyMem16x16(src, 16, dst, 16);
+ EXPECT_EQ(0, memcmp(src, dst, 16 * 16));
+
+ memset(dst, 0, 8 * 8);
+ df_sse_neon->CopyMem16x16(src, 8, dst, 8);
+ EXPECT_EQ(0, memcmp(src, dst, 8 * 8));
+
+ memset(dst, 0, 16 * 16);
+ df_sse_neon->CopyMem16x16(src, 16, dst, 16);
+ EXPECT_EQ(0, memcmp(src, dst, 16 * 16));
+}
+
+TEST_F(VideoProcessingTest, Variance) {
+ rtc::scoped_ptr<DenoiserFilter> df_c(DenoiserFilter::Create(false));
+ rtc::scoped_ptr<DenoiserFilter> df_sse_neon(DenoiserFilter::Create(true));
+ uint8_t src[16 * 16], dst[16 * 16];
+ uint32_t sum = 0, sse = 0, var;
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ src[i * 16 + j] = i * 16 + j;
+ }
+ }
+ // Compute the 16x8 variance of the 16x16 block.
+ for (int i = 0; i < 8; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ sum += (i * 32 + j);
+ sse += (i * 32 + j) * (i * 32 + j);
+ }
+ }
+ var = sse - ((sum * sum) >> 7);
+ memset(dst, 0, 16 * 16);
+ EXPECT_EQ(var, df_c->Variance16x8(src, 16, dst, 16, &sse));
+ EXPECT_EQ(var, df_sse_neon->Variance16x8(src, 16, dst, 16, &sse));
+}
+
+TEST_F(VideoProcessingTest, MbDenoise) {
+ rtc::scoped_ptr<DenoiserFilter> df_c(DenoiserFilter::Create(false));
+ rtc::scoped_ptr<DenoiserFilter> df_sse_neon(DenoiserFilter::Create(true));
+ uint8_t running_src[16 * 16], src[16 * 16], dst[16 * 16], dst_ref[16 * 16];
+
+ // Test case: |diff| <= |3 + shift_inc1|
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ running_src[i * 16 + j] = i * 11 + j;
+ src[i * 16 + j] = i * 11 + j + 2;
+ dst_ref[i * 16 + j] = running_src[i * 16 + j];
+ }
+ }
+ memset(dst, 0, 16 * 16);
+ df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_ref, 16 * 16));
+
+ // Test case: |diff| >= |4 + shift_inc1|
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ running_src[i * 16 + j] = i * 11 + j;
+ src[i * 16 + j] = i * 11 + j + 5;
+ dst_ref[i * 16 + j] = src[i * 16 + j] - 2;
+ }
+ }
+ memset(dst, 0, 16 * 16);
+ df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_ref, 16 * 16));
+ memset(dst, 0, 16 * 16);
+ df_sse_neon->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_ref, 16 * 16));
+
+ // Test case: |diff| >= 8
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ running_src[i * 16 + j] = i * 11 + j;
+ src[i * 16 + j] = i * 11 + j + 8;
+ dst_ref[i * 16 + j] = src[i * 16 + j] - 6;
+ }
+ }
+ memset(dst, 0, 16 * 16);
+ df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_ref, 16 * 16));
+ memset(dst, 0, 16 * 16);
+ df_sse_neon->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_ref, 16 * 16));
+
+ // Test case: |diff| > 15
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ running_src[i * 16 + j] = i * 11 + j;
+ src[i * 16 + j] = i * 11 + j + 16;
+ }
+ }
+ memset(dst, 0, 16 * 16);
+ DenoiserDecision decision =
+ df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(COPY_BLOCK, decision);
+ decision = df_sse_neon->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(COPY_BLOCK, decision);
+}
+
+TEST_F(VideoProcessingTest, Denoiser) {
+ // Create pure C denoiser.
+ VideoDenoiser denoiser_c(false);
+ // Create SSE or NEON denoiser.
+ VideoDenoiser denoiser_sse_neon(true);
+ VideoFrame denoised_frame_c;
+ VideoFrame denoised_frame_sse_neon;
+
+ rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_) {
+ // Using ConvertToI420 to add stride to the image.
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+
+ denoiser_c.DenoiseFrame(video_frame_, &denoised_frame_c);
+ denoiser_sse_neon.DenoiseFrame(video_frame_, &denoised_frame_sse_neon);
+
+ // Denoising results should be the same for C and SSE/NEON denoiser.
+ ASSERT_EQ(true, denoised_frame_c.EqualsFrame(denoised_frame_sse_neon));
+ }
+ ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/test/readYUV420file.m b/webrtc/modules/video_processing/test/readYUV420file.m
new file mode 100644
index 0000000000..f409820283
--- /dev/null
+++ b/webrtc/modules/video_processing/test/readYUV420file.m
@@ -0,0 +1,45 @@
+function [Y,U,V] = readYUV420file(filename, width, height)
+% [Y,U,V] = readYUVfile(filename, width, height)
+
+fid = fopen(filename,'rb');
+if fid==-1
+ error(['Cannot open file ' filename]);
+end
+
+% Number of pixels per image
+nPx=width*height;
+
+% nPx bytes luminance, nPx/4 bytes U, nPx/4 bytes V
+frameSizeBytes = nPx*1.5;
+
+% calculate number of frames
+fseek(fid,0,'eof'); % move to end of file
+fileLen=ftell(fid); % number of bytes
+fseek(fid,0,'bof'); % rewind to start
+
+% calculate number of frames
+numFrames = floor(fileLen/frameSizeBytes);
+
+Y=uint8(zeros(height,width,numFrames));
+U=uint8(zeros(height/2,width/2,numFrames));
+V=uint8(zeros(height/2,width/2,numFrames));
+
+[X,nBytes]=fread(fid, frameSizeBytes, 'uchar');
+
+for k=1:numFrames
+
+ % Store luminance
+ Y(:,:,k)=uint8(reshape(X(1:nPx), width, height).');
+
+ % Store U channel
+ U(:,:,k)=uint8(reshape(X(nPx + (1:nPx/4)), width/2, height/2).');
+
+ % Store V channel
+ V(:,:,k)=uint8(reshape(X(nPx + nPx/4 + (1:nPx/4)), width/2, height/2).');
+
+ % Read next frame
+ [X,nBytes]=fread(fid, frameSizeBytes, 'uchar');
+end
+
+
+fclose(fid);
diff --git a/webrtc/modules/video_processing/test/video_processing_unittest.cc b/webrtc/modules/video_processing/test/video_processing_unittest.cc
new file mode 100644
index 0000000000..2fd8fb6673
--- /dev/null
+++ b/webrtc/modules/video_processing/test/video_processing_unittest.cc
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
+
+#include <gflags/gflags.h>
+
+#include <string>
+
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/system_wrappers/include/tick_util.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+namespace {
+
+// Define command line flag 'gen_files' (default value: false).
+DEFINE_bool(gen_files, false, "Output files for visual inspection.");
+
+} // namespace
+
+static void PreprocessFrameAndVerify(const VideoFrame& source,
+ int target_width,
+ int target_height,
+ VideoProcessing* vpm,
+ const VideoFrame* out_frame);
+static void CropFrame(const uint8_t* source_data,
+ int source_width,
+ int source_height,
+ int offset_x,
+ int offset_y,
+ int cropped_width,
+ int cropped_height,
+ VideoFrame* cropped_frame);
+// The |source_data| is cropped and scaled to |target_width| x |target_height|,
+// and then scaled back to the expected cropped size. |expected_psnr| is used to
+// verify basic quality, and is set to be ~0.1/0.05dB lower than actual PSNR
+// verified under the same conditions.
+static void TestSize(const VideoFrame& source_frame,
+ const VideoFrame& cropped_source_frame,
+ int target_width,
+ int target_height,
+ double expected_psnr,
+ VideoProcessing* vpm);
+static bool CompareFrames(const webrtc::VideoFrame& frame1,
+ const webrtc::VideoFrame& frame2);
+static void WriteProcessedFrameForVisualInspection(const VideoFrame& source,
+ const VideoFrame& processed);
+
+VideoProcessingTest::VideoProcessingTest()
+ : vp_(NULL),
+ source_file_(NULL),
+ width_(352),
+ half_width_((width_ + 1) / 2),
+ height_(288),
+ size_y_(width_ * height_),
+ size_uv_(half_width_ * ((height_ + 1) / 2)),
+ frame_length_(CalcBufferSize(kI420, width_, height_)) {}
+
+void VideoProcessingTest::SetUp() {
+ vp_ = VideoProcessing::Create();
+ ASSERT_TRUE(vp_ != NULL);
+
+ ASSERT_EQ(0, video_frame_.CreateEmptyFrame(width_, height_, width_,
+ half_width_, half_width_));
+ // Clear video frame so DrMemory/Valgrind will allow reads of the buffer.
+ memset(video_frame_.buffer(kYPlane), 0, video_frame_.allocated_size(kYPlane));
+ memset(video_frame_.buffer(kUPlane), 0, video_frame_.allocated_size(kUPlane));
+ memset(video_frame_.buffer(kVPlane), 0, video_frame_.allocated_size(kVPlane));
+ const std::string video_file =
+ webrtc::test::ResourcePath("foreman_cif", "yuv");
+ source_file_ = fopen(video_file.c_str(), "rb");
+ ASSERT_TRUE(source_file_ != NULL)
+ << "Cannot read source file: " + video_file + "\n";
+}
+
+void VideoProcessingTest::TearDown() {
+ if (source_file_ != NULL) {
+ ASSERT_EQ(0, fclose(source_file_));
+ }
+ source_file_ = NULL;
+ delete vp_;
+ vp_ = NULL;
+}
+
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_HandleNullBuffer) {
+#else
+TEST_F(VideoProcessingTest, HandleNullBuffer) {
+#endif
+ // TODO(mikhal/stefan): Do we need this one?
+ VideoProcessing::FrameStats stats;
+ // Video frame with unallocated buffer.
+ VideoFrame videoFrame;
+
+ vp_->GetFrameStats(videoFrame, &stats);
+ EXPECT_EQ(stats.num_pixels, 0u);
+
+ EXPECT_EQ(-1, vp_->Deflickering(&videoFrame, &stats));
+
+ EXPECT_EQ(-3, vp_->BrightnessDetection(videoFrame, stats));
+}
+
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_HandleBadStats) {
+#else
+TEST_F(VideoProcessingTest, HandleBadStats) {
+#endif
+ VideoProcessing::FrameStats stats;
+ vp_->ClearFrameStats(&stats);
+ rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
+ ASSERT_EQ(frame_length_,
+ fread(video_buffer.get(), 1, frame_length_, source_file_));
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+
+ EXPECT_EQ(-1, vp_->Deflickering(&video_frame_, &stats));
+
+ EXPECT_EQ(-3, vp_->BrightnessDetection(video_frame_, stats));
+}
+
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_IdenticalResultsAfterReset) {
+#else
+TEST_F(VideoProcessingTest, IdenticalResultsAfterReset) {
+#endif
+ VideoFrame video_frame2;
+ VideoProcessing::FrameStats stats;
+ // Only testing non-static functions here.
+ rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
+ ASSERT_EQ(frame_length_,
+ fread(video_buffer.get(), 1, frame_length_, source_file_));
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ ASSERT_EQ(0, video_frame2.CopyFrame(video_frame_));
+ ASSERT_EQ(0, vp_->Deflickering(&video_frame_, &stats));
+
+ // Retrieve frame stats again in case Deflickering() has zeroed them.
+ vp_->GetFrameStats(video_frame2, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ ASSERT_EQ(0, vp_->Deflickering(&video_frame2, &stats));
+ EXPECT_TRUE(CompareFrames(video_frame_, video_frame2));
+
+ ASSERT_EQ(frame_length_,
+ fread(video_buffer.get(), 1, frame_length_, source_file_));
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ video_frame2.CopyFrame(video_frame_);
+ ASSERT_EQ(0, vp_->BrightnessDetection(video_frame_, stats));
+
+ ASSERT_EQ(0, vp_->BrightnessDetection(video_frame2, stats));
+ EXPECT_TRUE(CompareFrames(video_frame_, video_frame2));
+}
+
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_FrameStats) {
+#else
+TEST_F(VideoProcessingTest, FrameStats) {
+#endif
+ VideoProcessing::FrameStats stats;
+ vp_->ClearFrameStats(&stats);
+ rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
+ ASSERT_EQ(frame_length_,
+ fread(video_buffer.get(), 1, frame_length_, source_file_));
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+
+ EXPECT_FALSE(vp_->ValidFrameStats(stats));
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ EXPECT_TRUE(vp_->ValidFrameStats(stats));
+
+ printf("\nFrameStats\n");
+ printf("mean: %u\nnum_pixels: %u\nsubSamplFactor: %u\nsum: %u\n\n",
+ static_cast<unsigned int>(stats.mean),
+ static_cast<unsigned int>(stats.num_pixels),
+ static_cast<unsigned int>(stats.sub_sampling_factor),
+ static_cast<unsigned int>(stats.sum));
+
+ vp_->ClearFrameStats(&stats);
+ EXPECT_FALSE(vp_->ValidFrameStats(stats));
+}
+
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_PreprocessorLogic) {
+#else
+TEST_F(VideoProcessingTest, PreprocessorLogic) {
+#endif
+ // Disable temporal sampling (frame dropping).
+ vp_->EnableTemporalDecimation(false);
+ int resolution = 100;
+ EXPECT_EQ(VPM_OK, vp_->SetTargetResolution(resolution, resolution, 15));
+ EXPECT_EQ(VPM_OK, vp_->SetTargetResolution(resolution, resolution, 30));
+ // Disable spatial sampling.
+ vp_->SetInputFrameResampleMode(kNoRescaling);
+ EXPECT_EQ(VPM_OK, vp_->SetTargetResolution(resolution, resolution, 30));
+ VideoFrame* out_frame = NULL;
+ // Set rescaling => output frame != NULL.
+ vp_->SetInputFrameResampleMode(kFastRescaling);
+ PreprocessFrameAndVerify(video_frame_, resolution, resolution, vp_,
+ out_frame);
+ // No rescaling=> output frame = NULL.
+ vp_->SetInputFrameResampleMode(kNoRescaling);
+ EXPECT_TRUE(vp_->PreprocessFrame(video_frame_) != nullptr);
+}
+
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_Resampler) {
+#else
+TEST_F(VideoProcessingTest, Resampler) {
+#endif
+ enum { NumRuns = 1 };
+
+ int64_t min_runtime = 0;
+ int64_t total_runtime = 0;
+
+ rewind(source_file_);
+ ASSERT_TRUE(source_file_ != NULL) << "Cannot read input file \n";
+
+ // CA not needed here
+ vp_->EnableContentAnalysis(false);
+ // no temporal decimation
+ vp_->EnableTemporalDecimation(false);
+
+ // Reading test frame
+ rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
+ ASSERT_EQ(frame_length_,
+ fread(video_buffer.get(), 1, frame_length_, source_file_));
+ // Using ConvertToI420 to add stride to the image.
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+ // Cropped source frame that will contain the expected visible region.
+ VideoFrame cropped_source_frame;
+ cropped_source_frame.CopyFrame(video_frame_);
+
+ for (uint32_t run_idx = 0; run_idx < NumRuns; run_idx++) {
+ // Initiate test timer.
+ const TickTime time_start = TickTime::Now();
+
+ // Init the sourceFrame with a timestamp.
+ video_frame_.set_render_time_ms(time_start.MillisecondTimestamp());
+ video_frame_.set_timestamp(time_start.MillisecondTimestamp() * 90);
+
+ // Test scaling to different sizes: source is of |width|/|height| = 352/288.
+ // Pure scaling:
+ TestSize(video_frame_, video_frame_, width_ / 4, height_ / 4, 25.2, vp_);
+ TestSize(video_frame_, video_frame_, width_ / 2, height_ / 2, 28.1, vp_);
+ // No resampling:
+ TestSize(video_frame_, video_frame_, width_, height_, -1, vp_);
+ TestSize(video_frame_, video_frame_, 2 * width_, 2 * height_, 32.2, vp_);
+
+ // Scaling and cropping. The cropped source frame is the largest center
+ // aligned region that can be used from the source while preserving aspect
+ // ratio.
+ CropFrame(video_buffer.get(), width_, height_, 0, 56, 352, 176,
+ &cropped_source_frame);
+ TestSize(video_frame_, cropped_source_frame, 100, 50, 24.0, vp_);
+
+ CropFrame(video_buffer.get(), width_, height_, 0, 30, 352, 225,
+ &cropped_source_frame);
+ TestSize(video_frame_, cropped_source_frame, 400, 256, 31.3, vp_);
+
+ CropFrame(video_buffer.get(), width_, height_, 68, 0, 216, 288,
+ &cropped_source_frame);
+ TestSize(video_frame_, cropped_source_frame, 480, 640, 32.15, vp_);
+
+ CropFrame(video_buffer.get(), width_, height_, 0, 12, 352, 264,
+ &cropped_source_frame);
+ TestSize(video_frame_, cropped_source_frame, 960, 720, 32.2, vp_);
+
+ CropFrame(video_buffer.get(), width_, height_, 0, 44, 352, 198,
+ &cropped_source_frame);
+ TestSize(video_frame_, cropped_source_frame, 1280, 720, 32.15, vp_);
+
+ // Upsampling to odd size.
+ CropFrame(video_buffer.get(), width_, height_, 0, 26, 352, 233,
+ &cropped_source_frame);
+ TestSize(video_frame_, cropped_source_frame, 501, 333, 32.05, vp_);
+ // Downsample to odd size.
+ CropFrame(video_buffer.get(), width_, height_, 0, 34, 352, 219,
+ &cropped_source_frame);
+ TestSize(video_frame_, cropped_source_frame, 281, 175, 29.3, vp_);
+
+ // Stop timer.
+ const int64_t runtime = (TickTime::Now() - time_start).Microseconds();
+ if (runtime < min_runtime || run_idx == 0) {
+ min_runtime = runtime;
+ }
+ total_runtime += runtime;
+ }
+
+ printf("\nAverage run time = %d us / frame\n",
+ static_cast<int>(total_runtime));
+ printf("Min run time = %d us / frame\n\n", static_cast<int>(min_runtime));
+}
+
+void PreprocessFrameAndVerify(const VideoFrame& source,
+ int target_width,
+ int target_height,
+ VideoProcessing* vpm,
+ const VideoFrame* out_frame) {
+ ASSERT_EQ(VPM_OK, vpm->SetTargetResolution(target_width, target_height, 30));
+ out_frame = vpm->PreprocessFrame(source);
+ EXPECT_TRUE(out_frame != nullptr);
+
+ // If no resizing is needed, expect the original frame.
+ if (target_width == source.width() && target_height == source.height()) {
+ EXPECT_EQ(&source, out_frame);
+ return;
+ }
+
+ // Verify the resampled frame.
+ EXPECT_TRUE(out_frame != NULL);
+ EXPECT_EQ(source.render_time_ms(), (out_frame)->render_time_ms());
+ EXPECT_EQ(source.timestamp(), (out_frame)->timestamp());
+ EXPECT_EQ(target_width, (out_frame)->width());
+ EXPECT_EQ(target_height, (out_frame)->height());
+}
+
+void CropFrame(const uint8_t* source_data,
+ int source_width,
+ int source_height,
+ int offset_x,
+ int offset_y,
+ int cropped_width,
+ int cropped_height,
+ VideoFrame* cropped_frame) {
+ cropped_frame->CreateEmptyFrame(cropped_width, cropped_height, cropped_width,
+ (cropped_width + 1) / 2,
+ (cropped_width + 1) / 2);
+ EXPECT_EQ(0,
+ ConvertToI420(kI420, source_data, offset_x, offset_y, source_width,
+ source_height, 0, kVideoRotation_0, cropped_frame));
+}
+
+void TestSize(const VideoFrame& source_frame,
+ const VideoFrame& cropped_source_frame,
+ int target_width,
+ int target_height,
+ double expected_psnr,
+ VideoProcessing* vpm) {
+ // Resample source_frame to out_frame.
+ VideoFrame* out_frame = NULL;
+ vpm->SetInputFrameResampleMode(kBox);
+ PreprocessFrameAndVerify(source_frame, target_width, target_height, vpm,
+ out_frame);
+ if (out_frame == NULL)
+ return;
+ WriteProcessedFrameForVisualInspection(source_frame, *out_frame);
+
+ // Scale |resampled_source_frame| back to the source scale.
+ VideoFrame resampled_source_frame;
+ resampled_source_frame.CopyFrame(*out_frame);
+ PreprocessFrameAndVerify(resampled_source_frame, cropped_source_frame.width(),
+ cropped_source_frame.height(), vpm, out_frame);
+ WriteProcessedFrameForVisualInspection(resampled_source_frame, *out_frame);
+
+ // Compute PSNR against the cropped source frame and check expectation.
+ double psnr = I420PSNR(&cropped_source_frame, out_frame);
+ EXPECT_GT(psnr, expected_psnr);
+ printf(
+ "PSNR: %f. PSNR is between source of size %d %d, and a modified "
+ "source which is scaled down/up to: %d %d, and back to source size \n",
+ psnr, source_frame.width(), source_frame.height(), target_width,
+ target_height);
+}
+
+bool CompareFrames(const webrtc::VideoFrame& frame1,
+ const webrtc::VideoFrame& frame2) {
+ for (int plane = 0; plane < webrtc::kNumOfPlanes; plane++) {
+ webrtc::PlaneType plane_type = static_cast<webrtc::PlaneType>(plane);
+ int allocated_size1 = frame1.allocated_size(plane_type);
+ int allocated_size2 = frame2.allocated_size(plane_type);
+ if (allocated_size1 != allocated_size2)
+ return false;
+ const uint8_t* plane_buffer1 = frame1.buffer(plane_type);
+ const uint8_t* plane_buffer2 = frame2.buffer(plane_type);
+ if (memcmp(plane_buffer1, plane_buffer2, allocated_size1))
+ return false;
+ }
+ return true;
+}
+
+void WriteProcessedFrameForVisualInspection(const VideoFrame& source,
+ const VideoFrame& processed) {
+ // Skip if writing to files is not enabled.
+ if (!FLAGS_gen_files)
+ return;
+ // Write the processed frame to file for visual inspection.
+ std::ostringstream filename;
+ filename << webrtc::test::OutputPath() << "Resampler_from_" << source.width()
+ << "x" << source.height() << "_to_" << processed.width() << "x"
+ << processed.height() << "_30Hz_P420.yuv";
+ std::cout << "Watch " << filename.str() << " and verify that it is okay."
+ << std::endl;
+ FILE* stand_alone_file = fopen(filename.str().c_str(), "wb");
+ if (PrintVideoFrame(processed, stand_alone_file) < 0)
+ std::cerr << "Failed to write: " << filename.str() << std::endl;
+ if (stand_alone_file)
+ fclose(stand_alone_file);
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/test/video_processing_unittest.h b/webrtc/modules/video_processing/test/video_processing_unittest.h
new file mode 100644
index 0000000000..3433c6ca86
--- /dev/null
+++ b/webrtc/modules/video_processing/test/video_processing_unittest.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_TEST_VIDEO_PROCESSING_UNITTEST_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_TEST_VIDEO_PROCESSING_UNITTEST_H_
+
+#include <string>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+class VideoProcessingTest : public ::testing::Test {
+ protected:
+ VideoProcessingTest();
+ virtual void SetUp();
+ virtual void TearDown();
+ static void SetUpTestCase() {
+ Trace::CreateTrace();
+ std::string trace_file = webrtc::test::OutputPath() + "VPMTrace.txt";
+ ASSERT_EQ(0, Trace::SetTraceFile(trace_file.c_str()));
+ }
+ static void TearDownTestCase() { Trace::ReturnTrace(); }
+ VideoProcessing* vp_;
+ FILE* source_file_;
+ VideoFrame video_frame_;
+ const int width_;
+ const int half_width_;
+ const int height_;
+ const int size_y_;
+ const int size_uv_;
+ const size_t frame_length_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_TEST_VIDEO_PROCESSING_UNITTEST_H_
diff --git a/webrtc/modules/video_processing/test/writeYUV420file.m b/webrtc/modules/video_processing/test/writeYUV420file.m
new file mode 100644
index 0000000000..359445009b
--- /dev/null
+++ b/webrtc/modules/video_processing/test/writeYUV420file.m
@@ -0,0 +1,22 @@
+function writeYUV420file(filename, Y, U, V)
+% writeYUV420file(filename, Y, U, V)
+
+fid = fopen(filename,'wb');
+if fid==-1
+ error(['Cannot open file ' filename]);
+end
+
+numFrames=size(Y,3);
+
+for k=1:numFrames
+ % Write luminance
+ fwrite(fid,uint8(Y(:,:,k).'), 'uchar');
+
+ % Write U channel
+ fwrite(fid,uint8(U(:,:,k).'), 'uchar');
+
+ % Write V channel
+ fwrite(fid,uint8(V(:,:,k).'), 'uchar');
+end
+
+fclose(fid);