summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--modules/video_processing/main/interface/video_processing.h53
-rw-r--r--modules/video_processing/main/source/brighten.cc6
-rw-r--r--modules/video_processing/main/source/brighten.h2
-rw-r--r--modules/video_processing/main/source/brightness_detection.cc24
-rw-r--r--modules/video_processing/main/source/brightness_detection.h12
-rw-r--r--modules/video_processing/main/source/color_enhancement.cc8
-rw-r--r--modules/video_processing/main/source/color_enhancement.h2
-rw-r--r--modules/video_processing/main/source/color_enhancement_private.h2
-rw-r--r--modules/video_processing/main/source/content_analysis.cc64
-rw-r--r--modules/video_processing/main/source/content_analysis.h22
-rw-r--r--modules/video_processing/main/source/content_analysis_sse2.cc78
-rw-r--r--modules/video_processing/main/source/deflickering.cc126
-rw-r--r--modules/video_processing/main/source/deflickering.h30
-rw-r--r--modules/video_processing/main/source/denoising.cc38
-rw-r--r--modules/video_processing/main/source/denoising.h12
-rw-r--r--modules/video_processing/main/source/frame_preprocessor.cc25
-rw-r--r--modules/video_processing/main/source/frame_preprocessor.h26
-rw-r--r--modules/video_processing/main/source/spatial_resampler.cc16
-rw-r--r--modules/video_processing/main/source/spatial_resampler.h30
-rw-r--r--modules/video_processing/main/source/video_decimator.cc40
-rw-r--r--modules/video_processing/main/source/video_decimator.h22
-rw-r--r--modules/video_processing/main/source/video_processing_impl.cc46
-rw-r--r--modules/video_processing/main/source/video_processing_impl.h35
-rw-r--r--modules/video_processing/main/test/unit_test/brightness_detection_test.cc18
-rw-r--r--modules/video_processing/main/test/unit_test/color_enhancement_test.cc4
-rw-r--r--modules/video_processing/main/test/unit_test/deflickering_test.cc12
-rw-r--r--modules/video_processing/main/test/unit_test/denoising_test.cc18
-rw-r--r--modules/video_processing/main/test/unit_test/unit_test.cc6
28 files changed, 385 insertions, 392 deletions
diff --git a/modules/video_processing/main/interface/video_processing.h b/modules/video_processing/main/interface/video_processing.h
index 1aaade3c..9effc336 100644
--- a/modules/video_processing/main/interface/video_processing.h
+++ b/modules/video_processing/main/interface/video_processing.h
@@ -54,13 +54,13 @@ public:
memset(hist, 0, sizeof(hist));
}
- WebRtc_UWord32 hist[256]; /**< Histogram of frame */
- WebRtc_UWord32 mean; /**< Mean value of frame */
- WebRtc_UWord32 sum; /**< Sum of frame */
- WebRtc_UWord32 numPixels; /**< Number of pixels */
- WebRtc_UWord8 subSamplWidth; /**< Subsampling rate of width in powers
+ uint32_t hist[256]; /**< Histogram of frame */
+ uint32_t mean; /**< Mean value of frame */
+ uint32_t sum; /**< Sum of frame */
+ uint32_t numPixels; /**< Number of pixels */
+ uint8_t subSamplWidth; /**< Subsampling rate of width in powers
of 2 */
- WebRtc_UWord8 subSamplHeight; /**< Subsampling rate of height in powers
+ uint8_t subSamplHeight; /**< Subsampling rate of height in powers
of 2 */
};
@@ -82,7 +82,7 @@ public:
\return Pointer to a VPM object.
*/
- static VideoProcessingModule* Create(WebRtc_Word32 id);
+ static VideoProcessingModule* Create(int32_t id);
/**
Destroys a VPM object.
@@ -95,12 +95,12 @@ public:
/**
Not supported.
*/
- virtual WebRtc_Word32 TimeUntilNextProcess() { return -1; }
+ virtual int32_t TimeUntilNextProcess() { return -1; }
/**
Not supported.
*/
- virtual WebRtc_Word32 Process() { return -1; }
+ virtual int32_t Process() { return -1; }
/**
Resets all processing components to their initial states. This should be
@@ -120,8 +120,8 @@ public:
\return 0 on success, -1 on failure.
*/
- static WebRtc_Word32 GetFrameStats(FrameStats* stats,
- const I420VideoFrame& frame);
+ static int32_t GetFrameStats(FrameStats* stats,
+ const I420VideoFrame& frame);
/**
Checks the validity of a FrameStats struct. Currently, valid implies only
@@ -149,7 +149,7 @@ public:
\param[in,out] frame
Pointer to the video frame.
*/
- static WebRtc_Word32 ColorEnhancement(I420VideoFrame* frame);
+ static int32_t ColorEnhancement(I420VideoFrame* frame);
/**
Increases/decreases the luminance value.
@@ -163,7 +163,7 @@ public:
\return 0 on success, -1 on failure.
*/
- static WebRtc_Word32 Brighten(I420VideoFrame* frame, int delta);
+ static int32_t Brighten(I420VideoFrame* frame, int delta);
/**
Detects and removes camera flicker from a video stream. Every frame from
@@ -180,8 +180,7 @@ public:
\return 0 on success, -1 on failure.
*/
- virtual WebRtc_Word32 Deflickering(I420VideoFrame* frame,
- FrameStats* stats) = 0;
+ virtual int32_t Deflickering(I420VideoFrame* frame, FrameStats* stats) = 0;
/**
Denoises a video frame. Every frame from the stream should be passed in.
@@ -192,7 +191,7 @@ public:
\return The number of modified pixels on success, -1 on failure.
*/
- virtual WebRtc_Word32 Denoising(I420VideoFrame* frame) = 0;
+ virtual int32_t Denoising(I420VideoFrame* frame) = 0;
/**
Detects if a video frame is excessively bright or dark. Returns a
@@ -207,8 +206,8 @@ public:
\return A member of BrightnessWarning on success, -1 on error
*/
- virtual WebRtc_Word32 BrightnessDetection(const I420VideoFrame& frame,
- const FrameStats& stats) = 0;
+ virtual int32_t BrightnessDetection(const I420VideoFrame& frame,
+ const FrameStats& stats) = 0;
/**
The following functions refer to the pre-processor unit within VPM. The
@@ -238,9 +237,9 @@ public:
\return VPM_OK on success, a negative value on error (see error codes)
*/
- virtual WebRtc_Word32 SetTargetResolution(WebRtc_UWord32 width,
- WebRtc_UWord32 height,
- WebRtc_UWord32 frameRate) = 0;
+ virtual int32_t SetTargetResolution(uint32_t width,
+ uint32_t height,
+ uint32_t frameRate) = 0;
/**
Set max frame rate
@@ -248,22 +247,22 @@ public:
\return VPM_OK on success, a negative value on error (see error codes)
*/
- virtual WebRtc_Word32 SetMaxFrameRate(WebRtc_UWord32 maxFrameRate) = 0;
+ virtual int32_t SetMaxFrameRate(uint32_t maxFrameRate) = 0;
/**
Get decimated(target) frame rate
*/
- virtual WebRtc_UWord32 DecimatedFrameRate() = 0;
+ virtual uint32_t DecimatedFrameRate() = 0;
/**
Get decimated(target) frame width
*/
- virtual WebRtc_UWord32 DecimatedWidth() const = 0;
+ virtual uint32_t DecimatedWidth() const = 0;
/**
Get decimated(target) frame height
*/
- virtual WebRtc_UWord32 DecimatedHeight() const = 0 ;
+ virtual uint32_t DecimatedHeight() const = 0 ;
/**
Set the spatial resampling settings of the VPM: The resampler may either be
@@ -285,8 +284,8 @@ public:
\return VPM_OK on success, a negative value on error (see error codes)
*/
- virtual WebRtc_Word32 PreprocessFrame(const I420VideoFrame& frame,
- I420VideoFrame** processedFrame) = 0;
+ virtual int32_t PreprocessFrame(const I420VideoFrame& frame,
+ I420VideoFrame** processedFrame) = 0;
/**
Return content metrics for the last processed frame
diff --git a/modules/video_processing/main/source/brighten.cc b/modules/video_processing/main/source/brighten.cc
index 4c356e21..dd3d7017 100644
--- a/modules/video_processing/main/source/brighten.cc
+++ b/modules/video_processing/main/source/brighten.cc
@@ -17,7 +17,7 @@
namespace webrtc {
namespace VideoProcessing {
-WebRtc_Word32 Brighten(I420VideoFrame* frame, int delta) {
+int32_t Brighten(I420VideoFrame* frame, int delta) {
assert(frame);
if (frame->IsZeroSize()) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoPreocessing, -1,
@@ -39,10 +39,10 @@ WebRtc_Word32 Brighten(I420VideoFrame* frame, int delta) {
lookUp[i] = ((((val < 0) ? 0 : val) > 255) ? 255 : val);
}
- WebRtc_UWord8* tempPtr = frame->buffer(kYPlane);
+ uint8_t* tempPtr = frame->buffer(kYPlane);
for (int i = 0; i < numPixels; i++) {
- *tempPtr = static_cast<WebRtc_UWord8>(lookUp[*tempPtr]);
+ *tempPtr = static_cast<uint8_t>(lookUp[*tempPtr]);
tempPtr++;
}
return VPM_OK;
diff --git a/modules/video_processing/main/source/brighten.h b/modules/video_processing/main/source/brighten.h
index 2347286f..5dfb4072 100644
--- a/modules/video_processing/main/source/brighten.h
+++ b/modules/video_processing/main/source/brighten.h
@@ -17,7 +17,7 @@
namespace webrtc {
namespace VideoProcessing {
-WebRtc_Word32 Brighten(I420VideoFrame* frame, int delta);
+int32_t Brighten(I420VideoFrame* frame, int delta);
} // namespace VideoProcessing
} // namespace webrtc
diff --git a/modules/video_processing/main/source/brightness_detection.cc b/modules/video_processing/main/source/brightness_detection.cc
index a6d9c3d2..fc4a4bab 100644
--- a/modules/video_processing/main/source/brightness_detection.cc
+++ b/modules/video_processing/main/source/brightness_detection.cc
@@ -26,8 +26,8 @@ VPMBrightnessDetection::~VPMBrightnessDetection()
{
}
-WebRtc_Word32
-VPMBrightnessDetection::ChangeUniqueId(const WebRtc_Word32 id)
+int32_t
+VPMBrightnessDetection::ChangeUniqueId(const int32_t id)
{
_id = id;
return VPM_OK;
@@ -40,7 +40,7 @@ VPMBrightnessDetection::Reset()
_frameCntDark = 0;
}
-WebRtc_Word32
+int32_t
VPMBrightnessDetection::ProcessFrame(const I420VideoFrame& frame,
const VideoProcessingModule::FrameStats&
stats)
@@ -61,12 +61,12 @@ VPMBrightnessDetection::ProcessFrame(const I420VideoFrame& frame,
return VPM_PARAMETER_ERROR;
}
- const WebRtc_UWord8 frameCntAlarm = 2;
+ const uint8_t frameCntAlarm = 2;
// Get proportion in lowest bins
- WebRtc_UWord8 lowTh = 20;
+ uint8_t lowTh = 20;
float propLow = 0;
- for (WebRtc_UWord32 i = 0; i < lowTh; i++)
+ for (uint32_t i = 0; i < lowTh; i++)
{
propLow += stats.hist[i];
}
@@ -75,7 +75,7 @@ VPMBrightnessDetection::ProcessFrame(const I420VideoFrame& frame,
// Get proportion in highest bins
unsigned char highTh = 230;
float propHigh = 0;
- for (WebRtc_UWord32 i = highTh; i < 256; i++)
+ for (uint32_t i = highTh; i < 256; i++)
{
propHigh += stats.hist[i];
}
@@ -100,14 +100,14 @@ VPMBrightnessDetection::ProcessFrame(const I420VideoFrame& frame,
stdY = sqrt(stdY / stats.numPixels);
// Get percentiles
- WebRtc_UWord32 sum = 0;
- WebRtc_UWord32 medianY = 140;
- WebRtc_UWord32 perc05 = 0;
- WebRtc_UWord32 perc95 = 255;
+ uint32_t sum = 0;
+ uint32_t medianY = 140;
+ uint32_t perc05 = 0;
+ uint32_t perc95 = 255;
float posPerc05 = stats.numPixels * 0.05f;
float posMedian = stats.numPixels * 0.5f;
float posPerc95 = stats.numPixels * 0.95f;
- for (WebRtc_UWord32 i = 0; i < 256; i++)
+ for (uint32_t i = 0; i < 256; i++)
{
sum += stats.hist[i];
diff --git a/modules/video_processing/main/source/brightness_detection.h b/modules/video_processing/main/source/brightness_detection.h
index 3ea41189..4d0f3a7c 100644
--- a/modules/video_processing/main/source/brightness_detection.h
+++ b/modules/video_processing/main/source/brightness_detection.h
@@ -25,18 +25,18 @@ public:
VPMBrightnessDetection();
~VPMBrightnessDetection();
- WebRtc_Word32 ChangeUniqueId(WebRtc_Word32 id);
+ int32_t ChangeUniqueId(int32_t id);
void Reset();
- WebRtc_Word32 ProcessFrame(const I420VideoFrame& frame,
- const VideoProcessingModule::FrameStats& stats);
+ int32_t ProcessFrame(const I420VideoFrame& frame,
+ const VideoProcessingModule::FrameStats& stats);
private:
- WebRtc_Word32 _id;
+ int32_t _id;
- WebRtc_UWord32 _frameCntBright;
- WebRtc_UWord32 _frameCntDark;
+ uint32_t _frameCntBright;
+ uint32_t _frameCntDark;
};
} //namespace
diff --git a/modules/video_processing/main/source/color_enhancement.cc b/modules/video_processing/main/source/color_enhancement.cc
index a2feb98f..918fec76 100644
--- a/modules/video_processing/main/source/color_enhancement.cc
+++ b/modules/video_processing/main/source/color_enhancement.cc
@@ -17,14 +17,14 @@ namespace webrtc {
namespace VideoProcessing
{
- WebRtc_Word32
+ int32_t
ColorEnhancement(I420VideoFrame* frame)
{
assert(frame);
// pointers to U and V color pixels
- WebRtc_UWord8* ptrU;
- WebRtc_UWord8* ptrV;
- WebRtc_UWord8 tempChroma;
+ uint8_t* ptrU;
+ uint8_t* ptrV;
+ uint8_t tempChroma;
if (frame->IsZeroSize())
{
diff --git a/modules/video_processing/main/source/color_enhancement.h b/modules/video_processing/main/source/color_enhancement.h
index 67ba1727..33381d0b 100644
--- a/modules/video_processing/main/source/color_enhancement.h
+++ b/modules/video_processing/main/source/color_enhancement.h
@@ -21,7 +21,7 @@ namespace webrtc {
namespace VideoProcessing
{
- WebRtc_Word32 ColorEnhancement(I420VideoFrame* frame);
+ int32_t ColorEnhancement(I420VideoFrame* frame);
}
} //namespace
diff --git a/modules/video_processing/main/source/color_enhancement_private.h b/modules/video_processing/main/source/color_enhancement_private.h
index b88fc1a9..79fde159 100644
--- a/modules/video_processing/main/source/color_enhancement_private.h
+++ b/modules/video_processing/main/source/color_enhancement_private.h
@@ -9,7 +9,7 @@ namespace webrtc {
//Usage:
// Umod=colorTable[U][V]
// Vmod=colorTable[V][U]
-static const WebRtc_UWord8 colorTable[256][256] = {
+static const uint8_t colorTable[256][256] = {
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2},
diff --git a/modules/video_processing/main/source/content_analysis.cc b/modules/video_processing/main/source/content_analysis.cc
index 18fc4a04..c8f3c833 100644
--- a/modules/video_processing/main/source/content_analysis.cc
+++ b/modules/video_processing/main/source/content_analysis.cc
@@ -90,7 +90,7 @@ VPMContentAnalysis::ComputeContentMetrics(const I420VideoFrame& inputFrame)
return ContentMetrics();
}
-WebRtc_Word32
+int32_t
VPMContentAnalysis::Release()
{
if (_cMetrics != NULL)
@@ -112,7 +112,7 @@ VPMContentAnalysis::Release()
return VPM_OK;
}
-WebRtc_Word32
+int32_t
VPMContentAnalysis::Initialize(int width, int height)
{
_width = width;
@@ -158,7 +158,7 @@ VPMContentAnalysis::Initialize(int width, int height)
return VPM_MEMORY;
}
- _prevFrame = new WebRtc_UWord8[_width * _height] ; // Y only
+ _prevFrame = new uint8_t[_width * _height] ; // Y only
if (_prevFrame == NULL)
{
return VPM_MEMORY;
@@ -170,7 +170,7 @@ VPMContentAnalysis::Initialize(int width, int height)
// Compute motion metrics: magnitude over non-zero motion vectors,
// and size of zero cluster
-WebRtc_Word32
+int32_t
VPMContentAnalysis::ComputeMotionMetrics()
{
@@ -185,18 +185,18 @@ VPMContentAnalysis::ComputeMotionMetrics()
// Normalize MAD by spatial contrast: images with more contrast
// (pixel variance) likely have larger temporal difference
// To reduce complexity, we compute the metric for a reduced set of points.
-WebRtc_Word32
+int32_t
VPMContentAnalysis::TemporalDiffMetric_C()
{
// size of original frame
int sizei = _height;
int sizej = _width;
- WebRtc_UWord32 tempDiffSum = 0;
- WebRtc_UWord32 pixelSum = 0;
- WebRtc_UWord64 pixelSqSum = 0;
+ uint32_t tempDiffSum = 0;
+ uint32_t pixelSum = 0;
+ uint64_t pixelSqSum = 0;
- WebRtc_UWord32 numPixels = 0; // counter for # of pixels
+ uint32_t numPixels = 0; // counter for # of pixels
const int width_end = ((_width - 2*_border) & -16) + _border;
@@ -207,13 +207,13 @@ VPMContentAnalysis::TemporalDiffMetric_C()
numPixels += 1;
int ssn = i * sizej + j;
- WebRtc_UWord8 currPixel = _origFrame[ssn];
- WebRtc_UWord8 prevPixel = _prevFrame[ssn];
+ uint8_t currPixel = _origFrame[ssn];
+ uint8_t prevPixel = _prevFrame[ssn];
- tempDiffSum += (WebRtc_UWord32)
- abs((WebRtc_Word16)(currPixel - prevPixel));
- pixelSum += (WebRtc_UWord32) currPixel;
- pixelSqSum += (WebRtc_UWord64) (currPixel * currPixel);
+ tempDiffSum += (uint32_t)
+ abs((int16_t)(currPixel - prevPixel));
+ pixelSum += (uint32_t) currPixel;
+ pixelSqSum += (uint64_t) (currPixel * currPixel);
}
}
@@ -248,7 +248,7 @@ VPMContentAnalysis::TemporalDiffMetric_C()
// The metrics are a simple estimate of the up-sampling prediction error,
// estimated assuming sub-sampling for decimation (no filtering),
// and up-sampling back up with simple bilinear interpolation.
-WebRtc_Word32
+int32_t
VPMContentAnalysis::ComputeSpatialMetrics_C()
{
//size of original frame
@@ -256,11 +256,11 @@ VPMContentAnalysis::ComputeSpatialMetrics_C()
const int sizej = _width;
// pixel mean square average: used to normalize the spatial metrics
- WebRtc_UWord32 pixelMSA = 0;
+ uint32_t pixelMSA = 0;
- WebRtc_UWord32 spatialErrSum = 0;
- WebRtc_UWord32 spatialErrVSum = 0;
- WebRtc_UWord32 spatialErrHSum = 0;
+ uint32_t spatialErrSum = 0;
+ uint32_t spatialErrVSum = 0;
+ uint32_t spatialErrHSum = 0;
// make sure work section is a multiple of 16
const int width_end = ((sizej - 2*_border) & -16) + _border;
@@ -276,21 +276,21 @@ VPMContentAnalysis::ComputeSpatialMetrics_C()
int ssn4 = i * sizej + j + 1; // right
int ssn5 = i * sizej + j - 1; // left
- WebRtc_UWord16 refPixel1 = _origFrame[ssn1] << 1;
- WebRtc_UWord16 refPixel2 = _origFrame[ssn1] << 2;
+ uint16_t refPixel1 = _origFrame[ssn1] << 1;
+ uint16_t refPixel2 = _origFrame[ssn1] << 2;
- WebRtc_UWord8 bottPixel = _origFrame[ssn2];
- WebRtc_UWord8 topPixel = _origFrame[ssn3];
- WebRtc_UWord8 rightPixel = _origFrame[ssn4];
- WebRtc_UWord8 leftPixel = _origFrame[ssn5];
+ uint8_t bottPixel = _origFrame[ssn2];
+ uint8_t topPixel = _origFrame[ssn3];
+ uint8_t rightPixel = _origFrame[ssn4];
+ uint8_t leftPixel = _origFrame[ssn5];
- spatialErrSum += (WebRtc_UWord32) abs((WebRtc_Word16)(refPixel2
- - (WebRtc_UWord16)(bottPixel + topPixel
+ spatialErrSum += (uint32_t) abs((int16_t)(refPixel2
+ - (uint16_t)(bottPixel + topPixel
+ leftPixel + rightPixel)));
- spatialErrVSum += (WebRtc_UWord32) abs((WebRtc_Word16)(refPixel1
- - (WebRtc_UWord16)(bottPixel + topPixel)));
- spatialErrHSum += (WebRtc_UWord32) abs((WebRtc_Word16)(refPixel1
- - (WebRtc_UWord16)(leftPixel + rightPixel)));
+ spatialErrVSum += (uint32_t) abs((int16_t)(refPixel1
+ - (uint16_t)(bottPixel + topPixel)));
+ spatialErrHSum += (uint32_t) abs((int16_t)(refPixel1
+ - (uint16_t)(leftPixel + rightPixel)));
pixelMSA += _origFrame[ssn1];
}
diff --git a/modules/video_processing/main/source/content_analysis.h b/modules/video_processing/main/source/content_analysis.h
index 385674cb..2322e407 100644
--- a/modules/video_processing/main/source/content_analysis.h
+++ b/modules/video_processing/main/source/content_analysis.h
@@ -30,7 +30,7 @@ public:
// extractContentFeature
// Inputs: width, height
// Return value: 0 if OK, negative value upon error
- WebRtc_Word32 Initialize(int width, int height);
+ int32_t Initialize(int width, int height);
// Extract content Feature - main function of ContentAnalysis
// Input: new frame
@@ -41,7 +41,7 @@ public:
// Release all allocated memory
// Output: 0 if OK, negative value upon error
- WebRtc_Word32 Release();
+ int32_t Release();
private:
@@ -49,26 +49,26 @@ private:
VideoContentMetrics* ContentMetrics();
// Normalized temporal difference metric: for motion magnitude
- typedef WebRtc_Word32 (VPMContentAnalysis::*TemporalDiffMetricFunc)();
+ typedef int32_t (VPMContentAnalysis::*TemporalDiffMetricFunc)();
TemporalDiffMetricFunc TemporalDiffMetric;
- WebRtc_Word32 TemporalDiffMetric_C();
+ int32_t TemporalDiffMetric_C();
// Motion metric method: call 2 metrics (magnitude and size)
- WebRtc_Word32 ComputeMotionMetrics();
+ int32_t ComputeMotionMetrics();
// Spatial metric method: computes the 3 frame-average spatial
// prediction errors (1x2,2x1,2x2)
- typedef WebRtc_Word32 (VPMContentAnalysis::*ComputeSpatialMetricsFunc)();
+ typedef int32_t (VPMContentAnalysis::*ComputeSpatialMetricsFunc)();
ComputeSpatialMetricsFunc ComputeSpatialMetrics;
- WebRtc_Word32 ComputeSpatialMetrics_C();
+ int32_t ComputeSpatialMetrics_C();
#if defined(WEBRTC_ARCH_X86_FAMILY)
- WebRtc_Word32 ComputeSpatialMetrics_SSE2();
- WebRtc_Word32 TemporalDiffMetric_SSE2();
+ int32_t ComputeSpatialMetrics_SSE2();
+ int32_t TemporalDiffMetric_SSE2();
#endif
- const WebRtc_UWord8* _origFrame;
- WebRtc_UWord8* _prevFrame;
+ const uint8_t* _origFrame;
+ uint8_t* _prevFrame;
int _width;
int _height;
int _skipNum;
diff --git a/modules/video_processing/main/source/content_analysis_sse2.cc b/modules/video_processing/main/source/content_analysis_sse2.cc
index f505850d..8214bf9d 100644
--- a/modules/video_processing/main/source/content_analysis_sse2.cc
+++ b/modules/video_processing/main/source/content_analysis_sse2.cc
@@ -15,27 +15,27 @@
namespace webrtc {
-WebRtc_Word32
+int32_t
VPMContentAnalysis::TemporalDiffMetric_SSE2()
{
- WebRtc_UWord32 numPixels = 0; // counter for # of pixels
+ uint32_t numPixels = 0; // counter for # of pixels
- const WebRtc_UWord8* imgBufO = _origFrame + _border*_width + _border;
- const WebRtc_UWord8* imgBufP = _prevFrame + _border*_width + _border;
+ const uint8_t* imgBufO = _origFrame + _border*_width + _border;
+ const uint8_t* imgBufP = _prevFrame + _border*_width + _border;
- const WebRtc_Word32 width_end = ((_width - 2*_border) & -16) + _border;
+ const int32_t width_end = ((_width - 2*_border) & -16) + _border;
__m128i sad_64 = _mm_setzero_si128();
__m128i sum_64 = _mm_setzero_si128();
__m128i sqsum_64 = _mm_setzero_si128();
const __m128i z = _mm_setzero_si128();
- for(WebRtc_UWord16 i = 0; i < (_height - 2*_border); i += _skipNum)
+ for(uint16_t i = 0; i < (_height - 2*_border); i += _skipNum)
{
__m128i sqsum_32 = _mm_setzero_si128();
- const WebRtc_UWord8 *lineO = imgBufO;
- const WebRtc_UWord8 *lineP = imgBufP;
+ const uint8_t *lineO = imgBufO;
+ const uint8_t *lineP = imgBufP;
// Work on 16 pixels at a time. For HD content with a width of 1920
// this loop will run ~67 times (depending on border). Maximum for
@@ -45,7 +45,7 @@ VPMContentAnalysis::TemporalDiffMetric_SSE2()
// o*o will have a maximum of 255*255 = 65025. This will roll over
// a 16 bit accumulator as 67*65025 > 65535, but will fit in a
// 32 bit accumulator.
- for(WebRtc_UWord16 j = 0; j < width_end - _border; j += 16)
+ for(uint16_t j = 0; j < width_end - _border; j += 16)
{
const __m128i o = _mm_loadu_si128((__m128i*)(lineO));
const __m128i p = _mm_loadu_si128((__m128i*)(lineP));
@@ -90,16 +90,16 @@ VPMContentAnalysis::TemporalDiffMetric_SSE2()
_mm_store_si128 (&sum_final_128, sum_64);
_mm_store_si128 (&sqsum_final_128, sqsum_64);
- WebRtc_UWord64 *sad_final_64 =
- reinterpret_cast<WebRtc_UWord64*>(&sad_final_128);
- WebRtc_UWord64 *sum_final_64 =
- reinterpret_cast<WebRtc_UWord64*>(&sum_final_128);
- WebRtc_UWord64 *sqsum_final_64 =
- reinterpret_cast<WebRtc_UWord64*>(&sqsum_final_128);
+ uint64_t *sad_final_64 =
+ reinterpret_cast<uint64_t*>(&sad_final_128);
+ uint64_t *sum_final_64 =
+ reinterpret_cast<uint64_t*>(&sum_final_128);
+ uint64_t *sqsum_final_64 =
+ reinterpret_cast<uint64_t*>(&sqsum_final_128);
- const WebRtc_UWord32 pixelSum = sum_final_64[0] + sum_final_64[1];
- const WebRtc_UWord64 pixelSqSum = sqsum_final_64[0] + sqsum_final_64[1];
- const WebRtc_UWord32 tempDiffSum = sad_final_64[0] + sad_final_64[1];
+ const uint32_t pixelSum = sum_final_64[0] + sum_final_64[1];
+ const uint64_t pixelSqSum = sqsum_final_64[0] + sqsum_final_64[1];
+ const uint32_t tempDiffSum = sad_final_64[0] + sad_final_64[1];
// default
_motionMagnitude = 0.0f;
@@ -124,11 +124,11 @@ VPMContentAnalysis::TemporalDiffMetric_SSE2()
return VPM_OK;
}
-WebRtc_Word32
+int32_t
VPMContentAnalysis::ComputeSpatialMetrics_SSE2()
{
- const WebRtc_UWord8* imgBuf = _origFrame + _border*_width;
- const WebRtc_Word32 width_end = ((_width - 2*_border) & -16) + _border;
+ const uint8_t* imgBuf = _origFrame + _border*_width;
+ const int32_t width_end = ((_width - 2*_border) & -16) + _border;
__m128i se_32 = _mm_setzero_si128();
__m128i sev_32 = _mm_setzero_si128();
@@ -141,7 +141,7 @@ VPMContentAnalysis::ComputeSpatialMetrics_SSE2()
// value is maxed out at 65529 for every row, 65529*1080 = 70777800, which
// will not roll over a 32 bit accumulator.
// _skipNum is also used to reduce the number of rows
- for(WebRtc_Word32 i = 0; i < (_height - 2*_border); i += _skipNum)
+ for(int32_t i = 0; i < (_height - 2*_border); i += _skipNum)
{
__m128i se_16 = _mm_setzero_si128();
__m128i sev_16 = _mm_setzero_si128();
@@ -158,11 +158,11 @@ VPMContentAnalysis::ComputeSpatialMetrics_SSE2()
// _border could also be adjusted to concentrate on just the center of
// the images for an HD capture in order to reduce the possiblity of
// rollover.
- const WebRtc_UWord8 *lineTop = imgBuf - _width + _border;
- const WebRtc_UWord8 *lineCen = imgBuf + _border;
- const WebRtc_UWord8 *lineBot = imgBuf + _width + _border;
+ const uint8_t *lineTop = imgBuf - _width + _border;
+ const uint8_t *lineCen = imgBuf + _border;
+ const uint8_t *lineBot = imgBuf + _width + _border;
- for(WebRtc_Word32 j = 0; j < width_end - _border; j += 16)
+ for(int32_t j = 0; j < width_end - _border; j += 16)
{
const __m128i t = _mm_loadu_si128((__m128i*)(lineTop));
const __m128i l = _mm_loadu_si128((__m128i*)(lineCen - 1));
@@ -265,19 +265,19 @@ VPMContentAnalysis::ComputeSpatialMetrics_SSE2()
_mm_add_epi64(_mm_unpackhi_epi32(msa_32,z),
_mm_unpacklo_epi32(msa_32,z)));
- WebRtc_UWord64 *se_64 =
- reinterpret_cast<WebRtc_UWord64*>(&se_128);
- WebRtc_UWord64 *sev_64 =
- reinterpret_cast<WebRtc_UWord64*>(&sev_128);
- WebRtc_UWord64 *seh_64 =
- reinterpret_cast<WebRtc_UWord64*>(&seh_128);
- WebRtc_UWord64 *msa_64 =
- reinterpret_cast<WebRtc_UWord64*>(&msa_128);
-
- const WebRtc_UWord32 spatialErrSum = se_64[0] + se_64[1];
- const WebRtc_UWord32 spatialErrVSum = sev_64[0] + sev_64[1];
- const WebRtc_UWord32 spatialErrHSum = seh_64[0] + seh_64[1];
- const WebRtc_UWord32 pixelMSA = msa_64[0] + msa_64[1];
+ uint64_t *se_64 =
+ reinterpret_cast<uint64_t*>(&se_128);
+ uint64_t *sev_64 =
+ reinterpret_cast<uint64_t*>(&sev_128);
+ uint64_t *seh_64 =
+ reinterpret_cast<uint64_t*>(&seh_128);
+ uint64_t *msa_64 =
+ reinterpret_cast<uint64_t*>(&msa_128);
+
+ const uint32_t spatialErrSum = se_64[0] + se_64[1];
+ const uint32_t spatialErrVSum = sev_64[0] + sev_64[1];
+ const uint32_t spatialErrHSum = seh_64[0] + seh_64[1];
+ const uint32_t pixelMSA = msa_64[0] + msa_64[1];
// normalize over all pixels
const float spatialErr = (float)(spatialErrSum >> 2);
diff --git a/modules/video_processing/main/source/deflickering.cc b/modules/video_processing/main/source/deflickering.cc
index aaf30d7b..4bd617b9 100644
--- a/modules/video_processing/main/source/deflickering.cc
+++ b/modules/video_processing/main/source/deflickering.cc
@@ -36,14 +36,14 @@ enum { kLog2OfDownsamplingFactor = 3 };
// >> fprintf('%d, ', probUW16)
// Resolution reduced to avoid overflow when multiplying with the (potentially) large
// number of pixels.
-const WebRtc_UWord16 VPMDeflickering::_probUW16[kNumProbs] =
+const uint16_t VPMDeflickering::_probUW16[kNumProbs] =
{102, 205, 410, 614, 819, 1024, 1229, 1434, 1638, 1843, 1946, 1987}; // <Q11>
// To generate in Matlab:
// >> numQuants = 14; maxOnlyLength = 5;
// >> weightUW16 = round(2^15 * [linspace(0.5, 1.0, numQuants - maxOnlyLength)]);
// >> fprintf('%d, %d,\n ', weightUW16);
-const WebRtc_UWord16 VPMDeflickering::_weightUW16[kNumQuants - kMaxOnlyLength] =
+const uint16_t VPMDeflickering::_weightUW16[kNumQuants - kMaxOnlyLength] =
{16384, 18432, 20480, 22528, 24576, 26624, 28672, 30720, 32768}; // <Q15>
VPMDeflickering::VPMDeflickering() :
@@ -56,8 +56,8 @@ VPMDeflickering::~VPMDeflickering()
{
}
-WebRtc_Word32
-VPMDeflickering::ChangeUniqueId(const WebRtc_Word32 id)
+int32_t
+VPMDeflickering::ChangeUniqueId(const int32_t id)
{
_id = id;
return 0;
@@ -70,39 +70,39 @@ VPMDeflickering::Reset()
_detectionState = 0;
_frameRate = 0;
- memset(_meanBuffer, 0, sizeof(WebRtc_Word32) * kMeanBufferLength);
- memset(_timestampBuffer, 0, sizeof(WebRtc_Word32) * kMeanBufferLength);
+ memset(_meanBuffer, 0, sizeof(int32_t) * kMeanBufferLength);
+ memset(_timestampBuffer, 0, sizeof(int32_t) * kMeanBufferLength);
// Initialize the history with a uniformly distributed histogram
_quantHistUW8[0][0] = 0;
_quantHistUW8[0][kNumQuants - 1] = 255;
- for (WebRtc_Word32 i = 0; i < kNumProbs; i++)
+ for (int32_t i = 0; i < kNumProbs; i++)
{
- _quantHistUW8[0][i + 1] = static_cast<WebRtc_UWord8>((WEBRTC_SPL_UMUL_16_16(
+ _quantHistUW8[0][i + 1] = static_cast<uint8_t>((WEBRTC_SPL_UMUL_16_16(
_probUW16[i], 255) + (1 << 10)) >> 11); // Unsigned round. <Q0>
}
- for (WebRtc_Word32 i = 1; i < kFrameHistorySize; i++)
+ for (int32_t i = 1; i < kFrameHistorySize; i++)
{
- memcpy(_quantHistUW8[i], _quantHistUW8[0], sizeof(WebRtc_UWord8) * kNumQuants);
+ memcpy(_quantHistUW8[i], _quantHistUW8[0], sizeof(uint8_t) * kNumQuants);
}
}
-WebRtc_Word32
+int32_t
VPMDeflickering::ProcessFrame(I420VideoFrame* frame,
VideoProcessingModule::FrameStats* stats)
{
assert(frame);
- WebRtc_UWord32 frameMemory;
- WebRtc_UWord8 quantUW8[kNumQuants];
- WebRtc_UWord8 maxQuantUW8[kNumQuants];
- WebRtc_UWord8 minQuantUW8[kNumQuants];
- WebRtc_UWord16 targetQuantUW16[kNumQuants];
- WebRtc_UWord16 incrementUW16;
- WebRtc_UWord8 mapUW8[256];
-
- WebRtc_UWord16 tmpUW16;
- WebRtc_UWord32 tmpUW32;
+ uint32_t frameMemory;
+ uint8_t quantUW8[kNumQuants];
+ uint8_t maxQuantUW8[kNumQuants];
+ uint8_t minQuantUW8[kNumQuants];
+ uint16_t targetQuantUW16[kNumQuants];
+ uint16_t incrementUW16;
+ uint8_t mapUW8[256];
+
+ uint16_t tmpUW16;
+ uint32_t tmpUW32;
int width = frame->width();
int height = frame->height();
@@ -134,7 +134,7 @@ VPMDeflickering::ProcessFrame(I420VideoFrame* frame,
}
// Flicker detection
- WebRtc_Word32 detFlicker = DetectFlicker();
+ int32_t detFlicker = DetectFlicker();
if (detFlicker < 0)
{ // Error
return VPM_GENERAL_ERROR;
@@ -145,12 +145,12 @@ VPMDeflickering::ProcessFrame(I420VideoFrame* frame,
}
// Size of luminance component
- const WebRtc_UWord32 ySize = height * width;
+ const uint32_t ySize = height * width;
- const WebRtc_UWord32 ySubSize = width * (((height - 1) >>
+ const uint32_t ySubSize = width * (((height - 1) >>
kLog2OfDownsamplingFactor) + 1);
- WebRtc_UWord8* ySorted = new WebRtc_UWord8[ySubSize];
- WebRtc_UWord32 sortRowIdx = 0;
+ uint8_t* ySorted = new uint8_t[ySubSize];
+ uint32_t sortRowIdx = 0;
for (int i = 0; i < height; i += kDownsamplingFactor)
{
memcpy(ySorted + sortRowIdx * width,
@@ -160,7 +160,7 @@ VPMDeflickering::ProcessFrame(I420VideoFrame* frame,
webrtc::Sort(ySorted, ySubSize, webrtc::TYPE_UWord8);
- WebRtc_UWord32 probIdxUW32 = 0;
+ uint32_t probIdxUW32 = 0;
quantUW8[0] = 0;
quantUW8[kNumQuants - 1] = 255;
@@ -173,7 +173,7 @@ VPMDeflickering::ProcessFrame(I420VideoFrame* frame,
return -1;
}
- for (WebRtc_Word32 i = 0; i < kNumProbs; i++)
+ for (int32_t i = 0; i < kNumProbs; i++)
{
probIdxUW32 = WEBRTC_SPL_UMUL_32_16(ySubSize, _probUW16[i]) >> 11; // <Q0>
quantUW8[i + 1] = ySorted[probIdxUW32];
@@ -184,9 +184,9 @@ VPMDeflickering::ProcessFrame(I420VideoFrame* frame,
// Shift history for new frame.
memmove(_quantHistUW8[1], _quantHistUW8[0], (kFrameHistorySize - 1) * kNumQuants *
- sizeof(WebRtc_UWord8));
+ sizeof(uint8_t));
// Store current frame in history.
- memcpy(_quantHistUW8[0], quantUW8, kNumQuants * sizeof(WebRtc_UWord8));
+ memcpy(_quantHistUW8[0], quantUW8, kNumQuants * sizeof(uint8_t));
// We use a frame memory equal to the ceiling of half the frame rate to ensure we
// capture an entire period of flicker.
@@ -198,11 +198,11 @@ VPMDeflickering::ProcessFrame(I420VideoFrame* frame,
}
// Get maximum and minimum.
- for (WebRtc_Word32 i = 0; i < kNumQuants; i++)
+ for (int32_t i = 0; i < kNumQuants; i++)
{
maxQuantUW8[i] = 0;
minQuantUW8[i] = 255;
- for (WebRtc_UWord32 j = 0; j < frameMemory; j++)
+ for (uint32_t j = 0; j < frameMemory; j++)
{
if (_quantHistUW8[j][i] > maxQuantUW8[i])
{
@@ -217,30 +217,30 @@ VPMDeflickering::ProcessFrame(I420VideoFrame* frame,
}
// Get target quantiles.
- for (WebRtc_Word32 i = 0; i < kNumQuants - kMaxOnlyLength; i++)
+ for (int32_t i = 0; i < kNumQuants - kMaxOnlyLength; i++)
{
- targetQuantUW16[i] = static_cast<WebRtc_UWord16>((WEBRTC_SPL_UMUL_16_16(
+ targetQuantUW16[i] = static_cast<uint16_t>((WEBRTC_SPL_UMUL_16_16(
_weightUW16[i], maxQuantUW8[i]) + WEBRTC_SPL_UMUL_16_16((1 << 15) -
_weightUW16[i], minQuantUW8[i])) >> 8); // <Q7>
}
- for (WebRtc_Word32 i = kNumQuants - kMaxOnlyLength; i < kNumQuants; i++)
+ for (int32_t i = kNumQuants - kMaxOnlyLength; i < kNumQuants; i++)
{
- targetQuantUW16[i] = ((WebRtc_UWord16)maxQuantUW8[i]) << 7;
+ targetQuantUW16[i] = ((uint16_t)maxQuantUW8[i]) << 7;
}
// Compute the map from input to output pixels.
- WebRtc_UWord16 mapUW16; // <Q7>
- for (WebRtc_Word32 i = 1; i < kNumQuants; i++)
+ uint16_t mapUW16; // <Q7>
+ for (int32_t i = 1; i < kNumQuants; i++)
{
// As quant and targetQuant are limited to UWord8, we're safe to use Q7 here.
- tmpUW32 = static_cast<WebRtc_UWord32>(targetQuantUW16[i] -
+ tmpUW32 = static_cast<uint32_t>(targetQuantUW16[i] -
targetQuantUW16[i - 1]); // <Q7>
- tmpUW16 = static_cast<WebRtc_UWord16>(quantUW8[i] - quantUW8[i - 1]); // <Q0>
+ tmpUW16 = static_cast<uint16_t>(quantUW8[i] - quantUW8[i - 1]); // <Q0>
if (tmpUW16 > 0)
{
- incrementUW16 = static_cast<WebRtc_UWord16>(WebRtcSpl_DivU32U16(tmpUW32,
+ incrementUW16 = static_cast<uint16_t>(WebRtcSpl_DivU32U16(tmpUW32,
tmpUW16)); // <Q7>
}
else
@@ -250,16 +250,16 @@ VPMDeflickering::ProcessFrame(I420VideoFrame* frame,
}
mapUW16 = targetQuantUW16[i - 1];
- for (WebRtc_UWord32 j = quantUW8[i - 1]; j < (WebRtc_UWord32)(quantUW8[i] + 1); j++)
+ for (uint32_t j = quantUW8[i - 1]; j < (uint32_t)(quantUW8[i] + 1); j++)
{
- mapUW8[j] = (WebRtc_UWord8)((mapUW16 + (1 << 6)) >> 7); // Unsigned round. <Q0>
+ mapUW8[j] = (uint8_t)((mapUW16 + (1 << 6)) >> 7); // Unsigned round. <Q0>
mapUW16 += incrementUW16;
}
}
// Map to the output frame.
uint8_t* buffer = frame->buffer(kYPlane);
- for (WebRtc_UWord32 i = 0; i < ySize; i++)
+ for (uint32_t i = 0; i < ySize; i++)
{
buffer[i] = mapUW8[buffer[i]];
}
@@ -282,26 +282,26 @@ VPMDeflickering::ProcessFrame(I420VideoFrame* frame,
zero.\n
-1: Error
*/
-WebRtc_Word32
-VPMDeflickering::PreDetection(const WebRtc_UWord32 timestamp,
+int32_t
+VPMDeflickering::PreDetection(const uint32_t timestamp,
const VideoProcessingModule::FrameStats& stats)
{
- WebRtc_Word32 meanVal; // Mean value of frame (Q4)
- WebRtc_UWord32 frameRate = 0;
- WebRtc_Word32 meanBufferLength; // Temp variable
+ int32_t meanVal; // Mean value of frame (Q4)
+ uint32_t frameRate = 0;
+ int32_t meanBufferLength; // Temp variable
meanVal = ((stats.sum << kMeanValueScaling) / stats.numPixels);
/* Update mean value buffer.
* This should be done even though we might end up in an unreliable detection.
*/
- memmove(_meanBuffer + 1, _meanBuffer, (kMeanBufferLength - 1) * sizeof(WebRtc_Word32));
+ memmove(_meanBuffer + 1, _meanBuffer, (kMeanBufferLength - 1) * sizeof(int32_t));
_meanBuffer[0] = meanVal;
/* Update timestamp buffer.
* This should be done even though we might end up in an unreliable detection.
*/
memmove(_timestampBuffer + 1, _timestampBuffer, (kMeanBufferLength - 1) *
- sizeof(WebRtc_UWord32));
+ sizeof(uint32_t));
_timestampBuffer[0] = timestamp;
/* Compute current frame rate (Q4) */
@@ -354,12 +354,12 @@ VPMDeflickering::PreDetection(const WebRtc_UWord32 timestamp,
2: Detection not possible due to unreliable frequency interval
-1: Error
*/
-WebRtc_Word32 VPMDeflickering::DetectFlicker()
+int32_t VPMDeflickering::DetectFlicker()
{
/* Local variables */
- WebRtc_UWord32 i;
- WebRtc_Word32 freqEst; // (Q4) Frequency estimate to base detection upon
- WebRtc_Word32 retVal = -1;
+ uint32_t i;
+ int32_t freqEst; // (Q4) Frequency estimate to base detection upon
+ int32_t retVal = -1;
/* Sanity check for _meanBufferLength */
if (_meanBufferLength < 2)
@@ -370,11 +370,11 @@ WebRtc_Word32 VPMDeflickering::DetectFlicker()
/* Count zero crossings with a dead zone to be robust against noise.
* If the noise std is 2 pixel this corresponds to about 95% confidence interval.
*/
- WebRtc_Word32 deadzone = (kZeroCrossingDeadzone << kMeanValueScaling); // Q4
- WebRtc_Word32 meanOfBuffer = 0; // Mean value of mean value buffer
- WebRtc_Word32 numZeros = 0; // Number of zeros that cross the deadzone
- WebRtc_Word32 cntState = 0; // State variable for zero crossing regions
- WebRtc_Word32 cntStateOld = 0; // Previous state variable for zero crossing regions
+ int32_t deadzone = (kZeroCrossingDeadzone << kMeanValueScaling); // Q4
+ int32_t meanOfBuffer = 0; // Mean value of mean value buffer
+ int32_t numZeros = 0; // Number of zeros that cross the deadzone
+ int32_t cntState = 0; // State variable for zero crossing regions
+ int32_t cntStateOld = 0; // Previous state variable for zero crossing regions
for (i = 0; i < _meanBufferLength; i++)
{
@@ -411,14 +411,14 @@ WebRtc_Word32 VPMDeflickering::DetectFlicker()
freqEst /= (_timestampBuffer[0] - _timestampBuffer[_meanBufferLength - 1]);
/* Translate frequency estimate to regions close to 100 and 120 Hz */
- WebRtc_UWord8 freqState = 0; // Current translation state;
+ uint8_t freqState = 0; // Current translation state;
// (0) Not in interval,
// (1) Within valid interval,
// (2) Out of range
- WebRtc_Word32 freqAlias = freqEst;
+ int32_t freqAlias = freqEst;
if (freqEst > kMinFrequencyToDetect)
{
- WebRtc_UWord8 aliasState = 1;
+ uint8_t aliasState = 1;
while(freqState == 0)
{
/* Increase frequency */
diff --git a/modules/video_processing/main/source/deflickering.h b/modules/video_processing/main/source/deflickering.h
index fc796ee4..5d4ae2dd 100644
--- a/modules/video_processing/main/source/deflickering.h
+++ b/modules/video_processing/main/source/deflickering.h
@@ -28,17 +28,17 @@ public:
VPMDeflickering();
~VPMDeflickering();
- WebRtc_Word32 ChangeUniqueId(WebRtc_Word32 id);
+ int32_t ChangeUniqueId(int32_t id);
void Reset();
- WebRtc_Word32 ProcessFrame(I420VideoFrame* frame,
- VideoProcessingModule::FrameStats* stats);
+ int32_t ProcessFrame(I420VideoFrame* frame,
+ VideoProcessingModule::FrameStats* stats);
private:
- WebRtc_Word32 PreDetection(WebRtc_UWord32 timestamp,
- const VideoProcessingModule::FrameStats& stats);
+ int32_t PreDetection(uint32_t timestamp,
+ const VideoProcessingModule::FrameStats& stats);
- WebRtc_Word32 DetectFlicker();
+ int32_t DetectFlicker();
enum { kMeanBufferLength = 32 };
enum { kFrameHistorySize = 15 };
@@ -46,18 +46,18 @@ private:
enum { kNumQuants = kNumProbs + 2 };
enum { kMaxOnlyLength = 5 };
- WebRtc_Word32 _id;
+ int32_t _id;
- WebRtc_UWord32 _meanBufferLength;
- WebRtc_UWord8 _detectionState; // 0: No flickering
+ uint32_t _meanBufferLength;
+ uint8_t _detectionState; // 0: No flickering
// 1: Flickering detected
// 2: In flickering
- WebRtc_Word32 _meanBuffer[kMeanBufferLength];
- WebRtc_UWord32 _timestampBuffer[kMeanBufferLength];
- WebRtc_UWord32 _frameRate;
- static const WebRtc_UWord16 _probUW16[kNumProbs];
- static const WebRtc_UWord16 _weightUW16[kNumQuants - kMaxOnlyLength];
- WebRtc_UWord8 _quantHistUW8[kFrameHistorySize][kNumQuants];
+ int32_t _meanBuffer[kMeanBufferLength];
+ uint32_t _timestampBuffer[kMeanBufferLength];
+ uint32_t _frameRate;
+ static const uint16_t _probUW16[kNumProbs];
+ static const uint16_t _weightUW16[kNumQuants - kMaxOnlyLength];
+ uint8_t _quantHistUW8[kFrameHistorySize][kNumQuants];
};
} //namespace
diff --git a/modules/video_processing/main/source/denoising.cc b/modules/video_processing/main/source/denoising.cc
index f7cc32ad..801a9702 100644
--- a/modules/video_processing/main/source/denoising.cc
+++ b/modules/video_processing/main/source/denoising.cc
@@ -45,8 +45,8 @@ VPMDenoising::~VPMDenoising()
}
}
-WebRtc_Word32
-VPMDenoising::ChangeUniqueId(const WebRtc_Word32 id)
+int32_t
+VPMDenoising::ChangeUniqueId(const int32_t id)
{
_id = id;
return VPM_OK;
@@ -71,18 +71,18 @@ VPMDenoising::Reset()
}
}
-WebRtc_Word32
+int32_t
VPMDenoising::ProcessFrame(I420VideoFrame* frame)
{
assert(frame);
- WebRtc_Word32 thevar;
+ int32_t thevar;
int k;
int jsub, ksub;
- WebRtc_Word32 diff0;
- WebRtc_UWord32 tmpMoment1;
- WebRtc_UWord32 tmpMoment2;
- WebRtc_UWord32 tmp;
- WebRtc_Word32 numPixelsChanged = 0;
+ int32_t diff0;
+ uint32_t tmpMoment1;
+ uint32_t tmpMoment2;
+ uint32_t tmp;
+ int32_t numPixelsChanged = 0;
if (frame->IsZeroSize())
{
@@ -95,7 +95,7 @@ VPMDenoising::ProcessFrame(I420VideoFrame* frame)
int height = frame->height();
/* Size of luminance component */
- const WebRtc_UWord32 ysize = height * width;
+ const uint32_t ysize = height * width;
/* Initialization */
if (ysize != _frameSize)
@@ -110,14 +110,14 @@ VPMDenoising::ProcessFrame(I420VideoFrame* frame)
if (!_moment1)
{
- _moment1 = new WebRtc_UWord32[ysize];
- memset(_moment1, 0, sizeof(WebRtc_UWord32)*ysize);
+ _moment1 = new uint32_t[ysize];
+ memset(_moment1, 0, sizeof(uint32_t)*ysize);
}
if (!_moment2)
{
- _moment2 = new WebRtc_UWord32[ysize];
- memset(_moment2, 0, sizeof(WebRtc_UWord32)*ysize);
+ _moment2 = new uint32_t[ysize];
+ memset(_moment2, 0, sizeof(uint32_t)*ysize);
}
/* Apply de-noising on each pixel, but update variance sub-sampled */
@@ -133,22 +133,22 @@ VPMDenoising::ProcessFrame(I420VideoFrame* frame)
tmpMoment1 = _moment1[k + j];
tmpMoment1 *= kDenoiseFiltParam; // Q16
tmpMoment1 += ((kDenoiseFiltParamRec *
- ((WebRtc_UWord32)buffer[k + j])) << 8);
+ ((uint32_t)buffer[k + j])) << 8);
tmpMoment1 >>= 8; // Q8
_moment1[k + j] = tmpMoment1;
tmpMoment2 = _moment2[ksub + jsub];
if ((ksub == k) && (jsub == j) && (_denoiseFrameCnt == 0))
{
- tmp = ((WebRtc_UWord32)buffer[k + j] *
- (WebRtc_UWord32)buffer[k + j]);
+ tmp = ((uint32_t)buffer[k + j] *
+ (uint32_t)buffer[k + j]);
tmpMoment2 *= kDenoiseFiltParam; // Q16
tmpMoment2 += ((kDenoiseFiltParamRec * tmp)<<8);
tmpMoment2 >>= 8; // Q8
}
_moment2[k + j] = tmpMoment2;
/* Current event = deviation from mean value */
- diff0 = ((WebRtc_Word32)buffer[k + j] << 8) - _moment1[k + j];
+ diff0 = ((int32_t)buffer[k + j] << 8) - _moment1[k + j];
/* Recent events = variance (variations over time) */
thevar = _moment2[k + j];
thevar -= ((_moment1[k + j] * _moment1[k + j]) >> 8);
@@ -161,7 +161,7 @@ VPMDenoising::ProcessFrame(I420VideoFrame* frame)
if ((thevar < kDenoiseThreshold)
&& ((diff0 * diff0 >> 8) < kDenoiseThreshold))
{ // Replace with mean
- buffer[k + j] = (WebRtc_UWord8)(_moment1[k + j] >> 8);
+ buffer[k + j] = (uint8_t)(_moment1[k + j] >> 8);
numPixelsChanged++;
}
}
diff --git a/modules/video_processing/main/source/denoising.h b/modules/video_processing/main/source/denoising.h
index 18ed1afa..3e61dcdf 100644
--- a/modules/video_processing/main/source/denoising.h
+++ b/modules/video_processing/main/source/denoising.h
@@ -25,18 +25,18 @@ public:
VPMDenoising();
~VPMDenoising();
- WebRtc_Word32 ChangeUniqueId(WebRtc_Word32 id);
+ int32_t ChangeUniqueId(int32_t id);
void Reset();
- WebRtc_Word32 ProcessFrame(I420VideoFrame* frame);
+ int32_t ProcessFrame(I420VideoFrame* frame);
private:
- WebRtc_Word32 _id;
+ int32_t _id;
- WebRtc_UWord32* _moment1; // (Q8) First order moment (mean)
- WebRtc_UWord32* _moment2; // (Q8) Second order moment
- WebRtc_UWord32 _frameSize; // Size (# of pixels) of frame
+ uint32_t* _moment1; // (Q8) First order moment (mean)
+ uint32_t* _moment2; // (Q8) Second order moment
+ uint32_t _frameSize; // Size (# of pixels) of frame
int _denoiseFrameCnt; // Counter for subsampling in time
};
diff --git a/modules/video_processing/main/source/frame_preprocessor.cc b/modules/video_processing/main/source/frame_preprocessor.cc
index 4b342cef..59221556 100644
--- a/modules/video_processing/main/source/frame_preprocessor.cc
+++ b/modules/video_processing/main/source/frame_preprocessor.cc
@@ -34,8 +34,8 @@ VPMFramePreprocessor::~VPMFramePreprocessor()
delete _vd;
}
-WebRtc_Word32
-VPMFramePreprocessor::ChangeUniqueId(const WebRtc_Word32 id)
+int32_t
+VPMFramePreprocessor::ChangeUniqueId(const int32_t id)
{
_id = id;
return VPM_OK;
@@ -71,8 +71,8 @@ VPMFramePreprocessor::SetInputFrameResampleMode(VideoFrameResampling resamplingM
}
-WebRtc_Word32
-VPMFramePreprocessor::SetMaxFrameRate(WebRtc_UWord32 maxFrameRate)
+int32_t
+VPMFramePreprocessor::SetMaxFrameRate(uint32_t maxFrameRate)
{
if (maxFrameRate == 0)
{
@@ -85,14 +85,14 @@ VPMFramePreprocessor::SetMaxFrameRate(WebRtc_UWord32 maxFrameRate)
}
-WebRtc_Word32
-VPMFramePreprocessor::SetTargetResolution(WebRtc_UWord32 width, WebRtc_UWord32 height, WebRtc_UWord32 frameRate)
+int32_t
+VPMFramePreprocessor::SetTargetResolution(uint32_t width, uint32_t height, uint32_t frameRate)
{
if ( (width == 0) || (height == 0) || (frameRate == 0))
{
return VPM_PARAMETER_ERROR;
}
- WebRtc_Word32 retVal = 0;
+ int32_t retVal = 0;
retVal = _spatialResampler->SetTargetFrameSize(width, height);
if (retVal < 0)
{
@@ -113,28 +113,28 @@ VPMFramePreprocessor::UpdateIncomingFrameRate()
_vd->UpdateIncomingFrameRate();
}
-WebRtc_UWord32
+uint32_t
VPMFramePreprocessor::DecimatedFrameRate()
{
return _vd->DecimatedFrameRate();
}
-WebRtc_UWord32
+uint32_t
VPMFramePreprocessor::DecimatedWidth() const
{
return _spatialResampler->TargetWidth();
}
-WebRtc_UWord32
+uint32_t
VPMFramePreprocessor::DecimatedHeight() const
{
return _spatialResampler->TargetHeight();
}
-WebRtc_Word32
+int32_t
VPMFramePreprocessor::PreprocessFrame(const I420VideoFrame& frame,
I420VideoFrame** processedFrame)
{
@@ -156,8 +156,7 @@ VPMFramePreprocessor::PreprocessFrame(const I420VideoFrame& frame,
// We are not allowed to resample the input frame (must make a copy of it).
*processedFrame = NULL;
if (_spatialResampler->ApplyResample(frame.width(), frame.height())) {
- WebRtc_Word32 ret = _spatialResampler->ResampleFrame(frame,
- &_resampledFrame);
+ int32_t ret = _spatialResampler->ResampleFrame(frame, &_resampledFrame);
if (ret != VPM_OK)
return ret;
*processedFrame = &_resampledFrame;
diff --git a/modules/video_processing/main/source/frame_preprocessor.h b/modules/video_processing/main/source/frame_preprocessor.h
index 7fd8a528..03d18ee0 100644
--- a/modules/video_processing/main/source/frame_preprocessor.h
+++ b/modules/video_processing/main/source/frame_preprocessor.h
@@ -30,7 +30,7 @@ public:
VPMFramePreprocessor();
~VPMFramePreprocessor();
- WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id);
+ int32_t ChangeUniqueId(const int32_t id);
void Reset();
@@ -43,27 +43,25 @@ public:
void EnableContentAnalysis(bool enable);
//Set max frame rate
- WebRtc_Word32 SetMaxFrameRate(WebRtc_UWord32 maxFrameRate);
+ int32_t SetMaxFrameRate(uint32_t maxFrameRate);
//Set target resolution: frame rate and dimension
- WebRtc_Word32 SetTargetResolution(WebRtc_UWord32 width,
- WebRtc_UWord32 height,
- WebRtc_UWord32 frameRate);
+ int32_t SetTargetResolution(uint32_t width, uint32_t height,
+ uint32_t frameRate);
//Update incoming frame rate/dimension
void UpdateIncomingFrameRate();
- WebRtc_Word32 updateIncomingFrameSize(WebRtc_UWord32 width,
- WebRtc_UWord32 height);
+ int32_t updateIncomingFrameSize(uint32_t width, uint32_t height);
//Set decimated values: frame rate/dimension
- WebRtc_UWord32 DecimatedFrameRate();
- WebRtc_UWord32 DecimatedWidth() const;
- WebRtc_UWord32 DecimatedHeight() const;
+ uint32_t DecimatedFrameRate();
+ uint32_t DecimatedWidth() const;
+ uint32_t DecimatedHeight() const;
//Preprocess output:
- WebRtc_Word32 PreprocessFrame(const I420VideoFrame& frame,
- I420VideoFrame** processedFrame);
+ int32_t PreprocessFrame(const I420VideoFrame& frame,
+ I420VideoFrame** processedFrame);
VideoContentMetrics* ContentMetrics() const;
private:
@@ -71,9 +69,9 @@ private:
// we can compute new content metrics every |kSkipFrameCA| frames.
enum { kSkipFrameCA = 2 };
- WebRtc_Word32 _id;
+ int32_t _id;
VideoContentMetrics* _contentMetrics;
- WebRtc_UWord32 _maxFrameRate;
+ uint32_t _maxFrameRate;
I420VideoFrame _resampledFrame;
VPMSpatialResampler* _spatialResampler;
VPMContentAnalysis* _ca;
diff --git a/modules/video_processing/main/source/spatial_resampler.cc b/modules/video_processing/main/source/spatial_resampler.cc
index 207260e4..4dc73e9d 100644
--- a/modules/video_processing/main/source/spatial_resampler.cc
+++ b/modules/video_processing/main/source/spatial_resampler.cc
@@ -28,9 +28,9 @@ VPMSimpleSpatialResampler::~VPMSimpleSpatialResampler()
}
-WebRtc_Word32
-VPMSimpleSpatialResampler::SetTargetFrameSize(WebRtc_Word32 width,
- WebRtc_Word32 height)
+int32_t
+VPMSimpleSpatialResampler::SetTargetFrameSize(int32_t width,
+ int32_t height)
{
if (_resamplingMode == kNoRescaling) {
return VPM_OK;
@@ -61,7 +61,7 @@ VPMSimpleSpatialResampler::Reset()
_targetHeight = 0;
}
-WebRtc_Word32
+int32_t
VPMSimpleSpatialResampler::ResampleFrame(const I420VideoFrame& inFrame,
I420VideoFrame* outFrame)
{
@@ -96,21 +96,21 @@ VPMSimpleSpatialResampler::ResampleFrame(const I420VideoFrame& inFrame,
return VPM_SCALE_ERROR;
}
-WebRtc_Word32
+int32_t
VPMSimpleSpatialResampler::TargetHeight()
{
return _targetHeight;
}
-WebRtc_Word32
+int32_t
VPMSimpleSpatialResampler::TargetWidth()
{
return _targetWidth;
}
bool
-VPMSimpleSpatialResampler::ApplyResample(WebRtc_Word32 width,
- WebRtc_Word32 height)
+VPMSimpleSpatialResampler::ApplyResample(int32_t width,
+ int32_t height)
{
if ((width == _targetWidth && height == _targetHeight) ||
_resamplingMode == kNoRescaling)
diff --git a/modules/video_processing/main/source/spatial_resampler.h b/modules/video_processing/main/source/spatial_resampler.h
index 55dd817f..b8952d1d 100644
--- a/modules/video_processing/main/source/spatial_resampler.h
+++ b/modules/video_processing/main/source/spatial_resampler.h
@@ -29,16 +29,15 @@ class VPMSpatialResampler
{
public:
virtual ~VPMSpatialResampler() {};
- virtual WebRtc_Word32 SetTargetFrameSize(WebRtc_Word32 width,
- WebRtc_Word32 height) = 0;
+ virtual int32_t SetTargetFrameSize(int32_t width, int32_t height) = 0;
virtual void SetInputFrameResampleMode(VideoFrameResampling
resamplingMode) = 0;
virtual void Reset() = 0;
- virtual WebRtc_Word32 ResampleFrame(const I420VideoFrame& inFrame,
- I420VideoFrame* outFrame) = 0;
- virtual WebRtc_Word32 TargetWidth() = 0;
- virtual WebRtc_Word32 TargetHeight() = 0;
- virtual bool ApplyResample(WebRtc_Word32 width, WebRtc_Word32 height) = 0;
+ virtual int32_t ResampleFrame(const I420VideoFrame& inFrame,
+ I420VideoFrame* outFrame) = 0;
+ virtual int32_t TargetWidth() = 0;
+ virtual int32_t TargetHeight() = 0;
+ virtual bool ApplyResample(int32_t width, int32_t height) = 0;
};
class VPMSimpleSpatialResampler : public VPMSpatialResampler
@@ -46,21 +45,20 @@ class VPMSimpleSpatialResampler : public VPMSpatialResampler
public:
VPMSimpleSpatialResampler();
~VPMSimpleSpatialResampler();
- virtual WebRtc_Word32 SetTargetFrameSize(WebRtc_Word32 width,
- WebRtc_Word32 height);
+ virtual int32_t SetTargetFrameSize(int32_t width, int32_t height);
virtual void SetInputFrameResampleMode(VideoFrameResampling resamplingMode);
virtual void Reset();
- virtual WebRtc_Word32 ResampleFrame(const I420VideoFrame& inFrame,
- I420VideoFrame* outFrame);
- virtual WebRtc_Word32 TargetWidth();
- virtual WebRtc_Word32 TargetHeight();
- virtual bool ApplyResample(WebRtc_Word32 width, WebRtc_Word32 height);
+ virtual int32_t ResampleFrame(const I420VideoFrame& inFrame,
+ I420VideoFrame* outFrame);
+ virtual int32_t TargetWidth();
+ virtual int32_t TargetHeight();
+ virtual bool ApplyResample(int32_t width, int32_t height);
private:
VideoFrameResampling _resamplingMode;
- WebRtc_Word32 _targetWidth;
- WebRtc_Word32 _targetHeight;
+ int32_t _targetWidth;
+ int32_t _targetHeight;
Scaler _scaler;
};
diff --git a/modules/video_processing/main/source/video_decimator.cc b/modules/video_processing/main/source/video_decimator.cc
index 43bda088..a50189e1 100644
--- a/modules/video_processing/main/source/video_decimator.cc
+++ b/modules/video_processing/main/source/video_decimator.cc
@@ -53,8 +53,8 @@ VPMVideoDecimator::EnableTemporalDecimation(bool enable)
{
_enableTemporalDecimation = enable;
}
-WebRtc_Word32
-VPMVideoDecimator::SetMaxFrameRate(WebRtc_UWord32 maxFrameRate)
+int32_t
+VPMVideoDecimator::SetMaxFrameRate(uint32_t maxFrameRate)
{
if (maxFrameRate == 0)
{
@@ -71,8 +71,8 @@ VPMVideoDecimator::SetMaxFrameRate(WebRtc_UWord32 maxFrameRate)
return VPM_OK;
}
-WebRtc_Word32
-VPMVideoDecimator::SetTargetFrameRate(WebRtc_UWord32 frameRate)
+int32_t
+VPMVideoDecimator::SetTargetFrameRate(uint32_t frameRate)
{
if (frameRate == 0)
{
@@ -103,7 +103,7 @@ VPMVideoDecimator::DropFrame()
return false;
}
- const WebRtc_UWord32 incomingFrameRate = static_cast<WebRtc_UWord32>(_incomingFrameRate + 0.5f);
+ const uint32_t incomingFrameRate = static_cast<uint32_t>(_incomingFrameRate + 0.5f);
if (_targetFrameRate == 0)
{
@@ -113,14 +113,14 @@ VPMVideoDecimator::DropFrame()
bool drop = false;
if (incomingFrameRate > _targetFrameRate)
{
- WebRtc_Word32 overshoot = _overShootModifier + (incomingFrameRate - _targetFrameRate);
+ int32_t overshoot = _overShootModifier + (incomingFrameRate - _targetFrameRate);
if(overshoot < 0)
{
overshoot = 0;
_overShootModifier = 0;
}
- if (overshoot && 2 * overshoot < (WebRtc_Word32) incomingFrameRate)
+ if (overshoot && 2 * overshoot < (int32_t) incomingFrameRate)
{
if (_dropCount) // Just got here so drop to be sure.
@@ -128,12 +128,12 @@ VPMVideoDecimator::DropFrame()
_dropCount = 0;
return true;
}
- const WebRtc_UWord32 dropVar = incomingFrameRate / overshoot;
+ const uint32_t dropVar = incomingFrameRate / overshoot;
if (_keepCount >= dropVar)
{
drop = true;
- _overShootModifier = -((WebRtc_Word32) incomingFrameRate % overshoot) / 3;
+ _overShootModifier = -((int32_t) incomingFrameRate % overshoot) / 3;
_keepCount = 1;
}
else
@@ -145,7 +145,7 @@ VPMVideoDecimator::DropFrame()
else
{
_keepCount = 0;
- const WebRtc_UWord32 dropVar = overshoot / _targetFrameRate;
+ const uint32_t dropVar = overshoot / _targetFrameRate;
if (_dropCount < dropVar)
{
drop = true;
@@ -164,28 +164,28 @@ VPMVideoDecimator::DropFrame()
}
-WebRtc_UWord32
+uint32_t
VPMVideoDecimator::DecimatedFrameRate()
{
ProcessIncomingFrameRate(TickTime::MillisecondTimestamp());
if (!_enableTemporalDecimation)
{
- return static_cast<WebRtc_UWord32>(_incomingFrameRate + 0.5f);
+ return static_cast<uint32_t>(_incomingFrameRate + 0.5f);
}
- return VD_MIN(_targetFrameRate, static_cast<WebRtc_UWord32>(_incomingFrameRate + 0.5f));
+ return VD_MIN(_targetFrameRate, static_cast<uint32_t>(_incomingFrameRate + 0.5f));
}
-WebRtc_UWord32
+uint32_t
VPMVideoDecimator::InputFrameRate()
{
ProcessIncomingFrameRate(TickTime::MillisecondTimestamp());
- return static_cast<WebRtc_UWord32>(_incomingFrameRate + 0.5f);
+ return static_cast<uint32_t>(_incomingFrameRate + 0.5f);
}
void
VPMVideoDecimator::UpdateIncomingFrameRate()
{
- WebRtc_Word64 now = TickTime::MillisecondTimestamp();
+ int64_t now = TickTime::MillisecondTimestamp();
if(_incomingFrameTimes[0] == 0)
{
// first no shift
@@ -202,10 +202,10 @@ VPMVideoDecimator::UpdateIncomingFrameRate()
}
void
-VPMVideoDecimator::ProcessIncomingFrameRate(WebRtc_Word64 now)
+VPMVideoDecimator::ProcessIncomingFrameRate(int64_t now)
{
- WebRtc_Word32 num = 0;
- WebRtc_Word32 nrOfFrames = 0;
+ int32_t num = 0;
+ int32_t nrOfFrames = 0;
for(num = 1; num < (kFrameCountHistorySize - 1); num++)
{
if (_incomingFrameTimes[num] <= 0 ||
@@ -219,7 +219,7 @@ VPMVideoDecimator::ProcessIncomingFrameRate(WebRtc_Word64 now)
}
if (num > 1)
{
- WebRtc_Word64 diff = now - _incomingFrameTimes[num-1];
+ int64_t diff = now - _incomingFrameTimes[num-1];
_incomingFrameRate = 1.0;
if(diff >0)
{
diff --git a/modules/video_processing/main/source/video_decimator.h b/modules/video_processing/main/source/video_decimator.h
index e152bb98..f543633d 100644
--- a/modules/video_processing/main/source/video_decimator.h
+++ b/modules/video_processing/main/source/video_decimator.h
@@ -29,33 +29,33 @@ public:
void EnableTemporalDecimation(bool enable);
- WebRtc_Word32 SetMaxFrameRate(WebRtc_UWord32 maxFrameRate);
- WebRtc_Word32 SetTargetFrameRate(WebRtc_UWord32 frameRate);
+ int32_t SetMaxFrameRate(uint32_t maxFrameRate);
+ int32_t SetTargetFrameRate(uint32_t frameRate);
bool DropFrame();
void UpdateIncomingFrameRate();
// Get Decimated Frame Rate/Dimensions
- WebRtc_UWord32 DecimatedFrameRate();
+ uint32_t DecimatedFrameRate();
//Get input frame rate
- WebRtc_UWord32 InputFrameRate();
+ uint32_t InputFrameRate();
private:
- void ProcessIncomingFrameRate(WebRtc_Word64 now);
+ void ProcessIncomingFrameRate(int64_t now);
enum { kFrameCountHistorySize = 90};
enum { kFrameHistoryWindowMs = 2000};
// Temporal decimation
- WebRtc_Word32 _overShootModifier;
- WebRtc_UWord32 _dropCount;
- WebRtc_UWord32 _keepCount;
- WebRtc_UWord32 _targetFrameRate;
+ int32_t _overShootModifier;
+ uint32_t _dropCount;
+ uint32_t _keepCount;
+ uint32_t _targetFrameRate;
float _incomingFrameRate;
- WebRtc_UWord32 _maxFrameRate;
- WebRtc_Word64 _incomingFrameTimes[kFrameCountHistorySize];
+ uint32_t _maxFrameRate;
+ int64_t _incomingFrameTimes[kFrameCountHistorySize];
bool _enableTemporalDecimation;
};
diff --git a/modules/video_processing/main/source/video_processing_impl.cc b/modules/video_processing/main/source/video_processing_impl.cc
index 35ca8181..4f1433d2 100644
--- a/modules/video_processing/main/source/video_processing_impl.cc
+++ b/modules/video_processing/main/source/video_processing_impl.cc
@@ -21,8 +21,8 @@ namespace
{
void
SetSubSampling(VideoProcessingModule::FrameStats* stats,
- const WebRtc_Word32 width,
- const WebRtc_Word32 height)
+ const int32_t width,
+ const int32_t height)
{
if (width * height >= 640 * 480)
{
@@ -48,7 +48,7 @@ namespace
}
VideoProcessingModule*
-VideoProcessingModule::Create(const WebRtc_Word32 id)
+VideoProcessingModule::Create(const int32_t id)
{
return new VideoProcessingModuleImpl(id);
@@ -63,8 +63,8 @@ VideoProcessingModule::Destroy(VideoProcessingModule* module)
}
}
-WebRtc_Word32
-VideoProcessingModuleImpl::ChangeUniqueId(const WebRtc_Word32 id)
+int32_t
+VideoProcessingModuleImpl::ChangeUniqueId(const int32_t id)
{
CriticalSectionScoped mutex(&_mutex);
_id = id;
@@ -75,14 +75,14 @@ VideoProcessingModuleImpl::ChangeUniqueId(const WebRtc_Word32 id)
return VPM_OK;
}
-WebRtc_Word32
+int32_t
VideoProcessingModuleImpl::Id() const
{
CriticalSectionScoped mutex(&_mutex);
return _id;
}
-VideoProcessingModuleImpl::VideoProcessingModuleImpl(const WebRtc_Word32 id) :
+VideoProcessingModuleImpl::VideoProcessingModuleImpl(const int32_t id) :
_id(id),
_mutex(*CriticalSectionWrapper::CreateCriticalSection())
{
@@ -114,7 +114,7 @@ VideoProcessingModuleImpl::Reset()
}
-WebRtc_Word32
+int32_t
VideoProcessingModule::GetFrameStats(FrameStats* stats,
const I420VideoFrame& frame)
{
@@ -175,19 +175,19 @@ VideoProcessingModule::ClearFrameStats(FrameStats* stats)
memset(stats->hist, 0, sizeof(stats->hist));
}
-WebRtc_Word32
+int32_t
VideoProcessingModule::ColorEnhancement(I420VideoFrame* frame)
{
return VideoProcessing::ColorEnhancement(frame);
}
-WebRtc_Word32
+int32_t
VideoProcessingModule::Brighten(I420VideoFrame* frame, int delta)
{
return VideoProcessing::Brighten(frame, delta);
}
-WebRtc_Word32
+int32_t
VideoProcessingModuleImpl::Deflickering(I420VideoFrame* frame,
FrameStats* stats)
{
@@ -195,14 +195,14 @@ VideoProcessingModuleImpl::Deflickering(I420VideoFrame* frame,
return _deflickering.ProcessFrame(frame, stats);
}
-WebRtc_Word32
+int32_t
VideoProcessingModuleImpl::Denoising(I420VideoFrame* frame)
{
CriticalSectionScoped mutex(&_mutex);
return _denoising.ProcessFrame(frame);
}
-WebRtc_Word32
+int32_t
VideoProcessingModuleImpl::BrightnessDetection(const I420VideoFrame& frame,
const FrameStats& stats)
{
@@ -227,25 +227,25 @@ VideoProcessingModuleImpl::SetInputFrameResampleMode(VideoFrameResampling
_framePreProcessor.SetInputFrameResampleMode(resamplingMode);
}
-WebRtc_Word32
-VideoProcessingModuleImpl::SetMaxFrameRate(WebRtc_UWord32 maxFrameRate)
+int32_t
+VideoProcessingModuleImpl::SetMaxFrameRate(uint32_t maxFrameRate)
{
CriticalSectionScoped cs(&_mutex);
return _framePreProcessor.SetMaxFrameRate(maxFrameRate);
}
-WebRtc_Word32
-VideoProcessingModuleImpl::SetTargetResolution(WebRtc_UWord32 width,
- WebRtc_UWord32 height,
- WebRtc_UWord32 frameRate)
+int32_t
+VideoProcessingModuleImpl::SetTargetResolution(uint32_t width,
+ uint32_t height,
+ uint32_t frameRate)
{
CriticalSectionScoped cs(&_mutex);
return _framePreProcessor.SetTargetResolution(width, height, frameRate);
}
-WebRtc_UWord32
+uint32_t
VideoProcessingModuleImpl::DecimatedFrameRate()
{
CriticalSectionScoped cs(&_mutex);
@@ -253,21 +253,21 @@ VideoProcessingModuleImpl::DecimatedFrameRate()
}
-WebRtc_UWord32
+uint32_t
VideoProcessingModuleImpl::DecimatedWidth() const
{
CriticalSectionScoped cs(&_mutex);
return _framePreProcessor.DecimatedWidth();
}
-WebRtc_UWord32
+uint32_t
VideoProcessingModuleImpl::DecimatedHeight() const
{
CriticalSectionScoped cs(&_mutex);
return _framePreProcessor.DecimatedHeight();
}
-WebRtc_Word32
+int32_t
VideoProcessingModuleImpl::PreprocessFrame(const I420VideoFrame& frame,
I420VideoFrame **processedFrame)
{
diff --git a/modules/video_processing/main/source/video_processing_impl.h b/modules/video_processing/main/source/video_processing_impl.h
index b2c12c34..e622c990 100644
--- a/modules/video_processing/main/source/video_processing_impl.h
+++ b/modules/video_processing/main/source/video_processing_impl.h
@@ -26,23 +26,22 @@ class VideoProcessingModuleImpl : public VideoProcessingModule
{
public:
- VideoProcessingModuleImpl(WebRtc_Word32 id);
+ VideoProcessingModuleImpl(int32_t id);
virtual ~VideoProcessingModuleImpl();
- WebRtc_Word32 Id() const;
+ int32_t Id() const;
- virtual WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id);
+ virtual int32_t ChangeUniqueId(const int32_t id);
virtual void Reset();
- virtual WebRtc_Word32 Deflickering(I420VideoFrame* frame,
- FrameStats* stats);
+ virtual int32_t Deflickering(I420VideoFrame* frame, FrameStats* stats);
- virtual WebRtc_Word32 Denoising(I420VideoFrame* frame);
+ virtual int32_t Denoising(I420VideoFrame* frame);
- virtual WebRtc_Word32 BrightnessDetection(const I420VideoFrame& frame,
- const FrameStats& stats);
+ virtual int32_t BrightnessDetection(const I420VideoFrame& frame,
+ const FrameStats& stats);
//Frame pre-processor functions
@@ -55,29 +54,29 @@ public:
virtual void EnableContentAnalysis(bool enable);
//Set max frame rate
- virtual WebRtc_Word32 SetMaxFrameRate(WebRtc_UWord32 maxFrameRate);
+ virtual int32_t SetMaxFrameRate(uint32_t maxFrameRate);
// Set Target Resolution: frame rate and dimension
- virtual WebRtc_Word32 SetTargetResolution(WebRtc_UWord32 width,
- WebRtc_UWord32 height,
- WebRtc_UWord32 frameRate);
+ virtual int32_t SetTargetResolution(uint32_t width,
+ uint32_t height,
+ uint32_t frameRate);
// Get decimated values: frame rate/dimension
- virtual WebRtc_UWord32 DecimatedFrameRate();
- virtual WebRtc_UWord32 DecimatedWidth() const;
- virtual WebRtc_UWord32 DecimatedHeight() const;
+ virtual uint32_t DecimatedFrameRate();
+ virtual uint32_t DecimatedWidth() const;
+ virtual uint32_t DecimatedHeight() const;
// Preprocess:
// Pre-process incoming frame: Sample when needed and compute content
// metrics when enabled.
// If no resampling takes place - processedFrame is set to NULL.
- virtual WebRtc_Word32 PreprocessFrame(const I420VideoFrame& frame,
- I420VideoFrame** processedFrame);
+ virtual int32_t PreprocessFrame(const I420VideoFrame& frame,
+ I420VideoFrame** processedFrame);
virtual VideoContentMetrics* ContentMetrics() const;
private:
- WebRtc_Word32 _id;
+ int32_t _id;
CriticalSectionWrapper& _mutex;
VPMDeflickering _deflickering;
diff --git a/modules/video_processing/main/test/unit_test/brightness_detection_test.cc b/modules/video_processing/main/test/unit_test/brightness_detection_test.cc
index 28561ba0..1a5b8f39 100644
--- a/modules/video_processing/main/test/unit_test/brightness_detection_test.cc
+++ b/modules/video_processing/main/test/unit_test/brightness_detection_test.cc
@@ -16,9 +16,9 @@ using namespace webrtc;
TEST_F(VideoProcessingModuleTest, BrightnessDetection)
{
- WebRtc_UWord32 frameNum = 0;
- WebRtc_Word32 brightnessWarning = 0;
- WebRtc_UWord32 warningCount = 0;
+ uint32_t frameNum = 0;
+ int32_t brightnessWarning = 0;
+ uint32_t warningCount = 0;
scoped_array<uint8_t> video_buffer(new uint8_t[_frame_length]);
while (fread(video_buffer.get(), 1, _frame_length, _sourceFile) ==
_frame_length)
@@ -56,8 +56,8 @@ TEST_F(VideoProcessingModuleTest, BrightnessDetection)
0, kRotateNone, &_videoFrame));
frameNum++;
- WebRtc_UWord8* frame = _videoFrame.buffer(kYPlane);
- WebRtc_UWord32 yTmp = 0;
+ uint8_t* frame = _videoFrame.buffer(kYPlane);
+ uint32_t yTmp = 0;
for (int yIdx = 0; yIdx < _width * _height; yIdx++)
{
yTmp = frame[yIdx] << 1;
@@ -65,7 +65,7 @@ TEST_F(VideoProcessingModuleTest, BrightnessDetection)
{
yTmp = 255;
}
- frame[yIdx] = static_cast<WebRtc_UWord8>(yTmp);
+ frame[yIdx] = static_cast<uint8_t>(yTmp);
}
VideoProcessingModule::FrameStats stats;
@@ -96,12 +96,12 @@ TEST_F(VideoProcessingModuleTest, BrightnessDetection)
0, kRotateNone, &_videoFrame));
frameNum++;
- WebRtc_UWord8* y_plane = _videoFrame.buffer(kYPlane);
- WebRtc_Word32 yTmp = 0;
+ uint8_t* y_plane = _videoFrame.buffer(kYPlane);
+ int32_t yTmp = 0;
for (int yIdx = 0; yIdx < _width * _height; yIdx++)
{
yTmp = y_plane[yIdx] >> 1;
- y_plane[yIdx] = static_cast<WebRtc_UWord8>(yTmp);
+ y_plane[yIdx] = static_cast<uint8_t>(yTmp);
}
VideoProcessingModule::FrameStats stats;
diff --git a/modules/video_processing/main/test/unit_test/color_enhancement_test.cc b/modules/video_processing/main/test/unit_test/color_enhancement_test.cc
index bd48366e..34fd163f 100644
--- a/modules/video_processing/main/test/unit_test/color_enhancement_test.cc
+++ b/modules/video_processing/main/test/unit_test/color_enhancement_test.cc
@@ -38,7 +38,7 @@ TEST_F(VideoProcessingModuleTest, ColorEnhancement)
FILE* modFile = fopen(output_file.c_str(), "w+b");
ASSERT_TRUE(modFile != NULL) << "Could not open output file.\n";
- WebRtc_UWord32 frameNum = 0;
+ uint32_t frameNum = 0;
scoped_array<uint8_t> video_buffer(new uint8_t[_frame_length]);
while (fread(video_buffer.get(), 1, _frame_length, _sourceFile) ==
_frame_length)
@@ -114,7 +114,7 @@ TEST_F(VideoProcessingModuleTest, ColorEnhancement)
// Verify that all color pixels are enhanced, and no luminance values are
// altered.
- scoped_array<uint8_t> testFrame(new WebRtc_UWord8[_frame_length]);
+ scoped_array<uint8_t> testFrame(new uint8_t[_frame_length]);
// Use value 128 as probe value, since we know that this will be changed
// in the enhancement.
diff --git a/modules/video_processing/main/test/unit_test/deflickering_test.cc b/modules/video_processing/main/test/unit_test/deflickering_test.cc
index f47ee162..15082751 100644
--- a/modules/video_processing/main/test/unit_test/deflickering_test.cc
+++ b/modules/video_processing/main/test/unit_test/deflickering_test.cc
@@ -22,11 +22,11 @@ namespace webrtc {
TEST_F(VideoProcessingModuleTest, Deflickering)
{
enum { NumRuns = 30 };
- WebRtc_UWord32 frameNum = 0;
- const WebRtc_UWord32 frameRate = 15;
+ uint32_t frameNum = 0;
+ const uint32_t frameRate = 15;
- WebRtc_Word64 minRuntime = 0;
- WebRtc_Word64 avgRuntime = 0;
+ int64_t minRuntime = 0;
+ int64_t avgRuntime = 0;
// Close automatically opened Foreman.
fclose(_sourceFile);
@@ -44,12 +44,12 @@ TEST_F(VideoProcessingModuleTest, Deflickering)
printf("\nRun time [us / frame]:\n");
scoped_array<uint8_t> video_buffer(new uint8_t[_frame_length]);
- for (WebRtc_UWord32 runIdx = 0; runIdx < NumRuns; runIdx++)
+ for (uint32_t runIdx = 0; runIdx < NumRuns; runIdx++)
{
TickTime t0;
TickTime t1;
TickInterval accTicks;
- WebRtc_UWord32 timeStamp = 1;
+ uint32_t timeStamp = 1;
frameNum = 0;
while (fread(video_buffer.get(), 1, _frame_length, _sourceFile) ==
diff --git a/modules/video_processing/main/test/unit_test/denoising_test.cc b/modules/video_processing/main/test/unit_test/denoising_test.cc
index 5a1d5460..fab064ee 100644
--- a/modules/video_processing/main/test/unit_test/denoising_test.cc
+++ b/modules/video_processing/main/test/unit_test/denoising_test.cc
@@ -22,10 +22,10 @@ namespace webrtc {
TEST_F(VideoProcessingModuleTest, Denoising)
{
enum { NumRuns = 10 };
- WebRtc_UWord32 frameNum = 0;
+ uint32_t frameNum = 0;
- WebRtc_Word64 minRuntime = 0;
- WebRtc_Word64 avgRuntime = 0;
+ int64_t minRuntime = 0;
+ int64_t avgRuntime = 0;
const std::string denoise_filename =
webrtc::test::OutputPath() + "denoise_testfile.yuv";
@@ -40,12 +40,12 @@ TEST_F(VideoProcessingModuleTest, Denoising)
"Could not open noisy file: " << noise_filename << "\n";
printf("\nRun time [us / frame]:\n");
- for (WebRtc_UWord32 runIdx = 0; runIdx < NumRuns; runIdx++)
+ for (uint32_t runIdx = 0; runIdx < NumRuns; runIdx++)
{
TickTime t0;
TickTime t1;
TickInterval accTicks;
- WebRtc_Word32 modifiedPixels = 0;
+ int32_t modifiedPixels = 0;
frameNum = 0;
scoped_array<uint8_t> video_buffer(new uint8_t[_frame_length]);
@@ -56,7 +56,7 @@ TEST_F(VideoProcessingModuleTest, Denoising)
_width, _height,
0, kRotateNone, &_videoFrame));
frameNum++;
- WebRtc_UWord8* sourceBuffer = _videoFrame.buffer(kYPlane);
+ uint8_t* sourceBuffer = _videoFrame.buffer(kYPlane);
// Add noise to a part in video stream
// Random noise
@@ -64,10 +64,10 @@ TEST_F(VideoProcessingModuleTest, Denoising)
for (int ir = 0; ir < _height; ir++)
{
- WebRtc_UWord32 ik = ir * _width;
+ uint32_t ik = ir * _width;
for (int ic = 0; ic < _width; ic++)
{
- WebRtc_UWord8 r = rand() % 16;
+ uint8_t r = rand() % 16;
r -= 8;
if (ir < _height / 4)
r = 0;
@@ -78,7 +78,7 @@ TEST_F(VideoProcessingModuleTest, Denoising)
if (ic >= 3 * _width / 4)
r = 0;
- /*WebRtc_UWord8 pixelValue = 0;
+ /*uint8_t pixelValue = 0;
if (ir >= _height / 2)
{ // Region 3 or 4
pixelValue = 170;
diff --git a/modules/video_processing/main/test/unit_test/unit_test.cc b/modules/video_processing/main/test/unit_test/unit_test.cc
index da9cdfde..e22d1d04 100644
--- a/modules/video_processing/main/test/unit_test/unit_test.cc
+++ b/modules/video_processing/main/test/unit_test/unit_test.cc
@@ -233,8 +233,8 @@ TEST_F(VideoProcessingModuleTest, Resampler)
{
enum { NumRuns = 1 };
- WebRtc_Word64 minRuntime = 0;
- WebRtc_Word64 avgRuntime = 0;
+ int64_t minRuntime = 0;
+ int64_t avgRuntime = 0;
TickTime t0;
TickTime t1;
@@ -258,7 +258,7 @@ TEST_F(VideoProcessingModuleTest, Resampler)
_width, _height,
0, kRotateNone, &_videoFrame));
- for (WebRtc_UWord32 runIdx = 0; runIdx < NumRuns; runIdx++)
+ for (uint32_t runIdx = 0; runIdx < NumRuns; runIdx++)
{
// initiate test timer
t0 = TickTime::Now();