aboutsummaryrefslogtreecommitdiff
path: root/webrtc/modules/video_processing/main/source/content_analysis.cc
blob: d29db274088da987bd1e780a2d41633fca950729 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
/*
 *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
 *  in the file PATENTS.  All contributing project authors may
 *  be found in the AUTHORS file in the root of the source tree.
 */
#include "webrtc/modules/video_processing/main/source/content_analysis.h"

#include <math.h>
#include <stdlib.h>

#include "webrtc/system_wrappers/include/cpu_features_wrapper.h"
#include "webrtc/system_wrappers/include/tick_util.h"

namespace webrtc {

VPMContentAnalysis::VPMContentAnalysis(bool runtime_cpu_detection)
    : orig_frame_(NULL),
      prev_frame_(NULL),
      width_(0),
      height_(0),
      skip_num_(1),
      border_(8),
      motion_magnitude_(0.0f),
      spatial_pred_err_(0.0f),
      spatial_pred_err_h_(0.0f),
      spatial_pred_err_v_(0.0f),
      first_frame_(true),
      ca_Init_(false),
      content_metrics_(NULL) {
  ComputeSpatialMetrics = &VPMContentAnalysis::ComputeSpatialMetrics_C;
  TemporalDiffMetric = &VPMContentAnalysis::TemporalDiffMetric_C;

  if (runtime_cpu_detection) {
#if defined(WEBRTC_ARCH_X86_FAMILY)
    if (WebRtc_GetCPUInfo(kSSE2)) {
      ComputeSpatialMetrics = &VPMContentAnalysis::ComputeSpatialMetrics_SSE2;
      TemporalDiffMetric = &VPMContentAnalysis::TemporalDiffMetric_SSE2;
    }
#endif
  }
  Release();
}

VPMContentAnalysis::~VPMContentAnalysis() {
  Release();
}

VideoContentMetrics* VPMContentAnalysis::ComputeContentMetrics(
    const VideoFrame& inputFrame) {
  if (inputFrame.IsZeroSize())
    return NULL;

  // Init if needed (native dimension change).
  if (width_ != inputFrame.width() || height_ != inputFrame.height()) {
    if (VPM_OK != Initialize(inputFrame.width(), inputFrame.height()))
      return NULL;
  }
  // Only interested in the Y plane.
  orig_frame_ = inputFrame.buffer(kYPlane);

  // Compute spatial metrics: 3 spatial prediction errors.
  (this->*ComputeSpatialMetrics)();

  // Compute motion metrics
  if (first_frame_ == false)
    ComputeMotionMetrics();

  // Saving current frame as previous one: Y only.
  memcpy(prev_frame_, orig_frame_, width_ * height_);

  first_frame_ =  false;
  ca_Init_ = true;

  return ContentMetrics();
}

int32_t VPMContentAnalysis::Release() {
  if (content_metrics_ != NULL) {
    delete content_metrics_;
    content_metrics_ = NULL;
  }

  if (prev_frame_ != NULL) {
    delete [] prev_frame_;
    prev_frame_ = NULL;
  }

  width_ = 0;
  height_ = 0;
  first_frame_ = true;

  return VPM_OK;
}

int32_t VPMContentAnalysis::Initialize(int width, int height) {
  width_ = width;
  height_ = height;
  first_frame_ = true;

  // skip parameter: # of skipped rows: for complexity reduction
  //  temporal also currently uses it for column reduction.
  skip_num_ = 1;

  // use skipNum = 2 for 4CIF, WHD
  if ( (height_ >=  576) && (width_ >= 704) ) {
    skip_num_ = 2;
  }
  // use skipNum = 4 for FULLL_HD images
  if ( (height_ >=  1080) && (width_ >= 1920) ) {
    skip_num_ = 4;
  }

  if (content_metrics_ != NULL) {
    delete content_metrics_;
  }

  if (prev_frame_ != NULL) {
    delete [] prev_frame_;
  }

  // Spatial Metrics don't work on a border of 8. Minimum processing
  // block size is 16 pixels.  So make sure the width and height support this.
  if (width_ <= 32 || height_ <= 32) {
    ca_Init_ = false;
    return VPM_PARAMETER_ERROR;
  }

  content_metrics_ = new VideoContentMetrics();
  if (content_metrics_ == NULL) {
    return VPM_MEMORY;
  }

  prev_frame_ = new uint8_t[width_ * height_];  // Y only.
  if (prev_frame_ == NULL) return VPM_MEMORY;

  return VPM_OK;
}


// Compute motion metrics: magnitude over non-zero motion vectors,
//  and size of zero cluster
int32_t VPMContentAnalysis::ComputeMotionMetrics() {
  // Motion metrics: only one is derived from normalized
  //  (MAD) temporal difference
  (this->*TemporalDiffMetric)();
  return VPM_OK;
}

// Normalized temporal difference (MAD): used as a motion level metric
// Normalize MAD by spatial contrast: images with more contrast
//  (pixel variance) likely have larger temporal difference
// To reduce complexity, we compute the metric for a reduced set of points.
int32_t VPMContentAnalysis::TemporalDiffMetric_C() {
  // size of original frame
  int sizei = height_;
  int sizej = width_;
  uint32_t tempDiffSum = 0;
  uint32_t pixelSum = 0;
  uint64_t pixelSqSum = 0;

  uint32_t num_pixels = 0;  // Counter for # of pixels.
  const int width_end = ((width_ - 2*border_) & -16) + border_;

  for (int i = border_; i < sizei - border_; i += skip_num_) {
    for (int j = border_; j < width_end; j++) {
      num_pixels += 1;
      int ssn =  i * sizej + j;

      uint8_t currPixel  = orig_frame_[ssn];
      uint8_t prevPixel  = prev_frame_[ssn];

      tempDiffSum += (uint32_t)abs((int16_t)(currPixel - prevPixel));
      pixelSum += (uint32_t) currPixel;
      pixelSqSum += (uint64_t) (currPixel * currPixel);
    }
  }

  // Default.
  motion_magnitude_ = 0.0f;

  if (tempDiffSum == 0) return VPM_OK;

  // Normalize over all pixels.
  float const tempDiffAvg = (float)tempDiffSum / (float)(num_pixels);
  float const pixelSumAvg = (float)pixelSum / (float)(num_pixels);
  float const pixelSqSumAvg = (float)pixelSqSum / (float)(num_pixels);
  float contrast = pixelSqSumAvg - (pixelSumAvg * pixelSumAvg);

  if (contrast > 0.0) {
    contrast = sqrt(contrast);
    motion_magnitude_ = tempDiffAvg/contrast;
  }
  return VPM_OK;
}

// Compute spatial metrics:
// To reduce complexity, we compute the metric for a reduced set of points.
// The spatial metrics are rough estimates of the prediction error cost for
//  each QM spatial mode: 2x2,1x2,2x1
// The metrics are a simple estimate of the up-sampling prediction error,
// estimated assuming sub-sampling for decimation (no filtering),
// and up-sampling back up with simple bilinear interpolation.
int32_t VPMContentAnalysis::ComputeSpatialMetrics_C() {
  const int sizei = height_;
  const int sizej = width_;

  // Pixel mean square average: used to normalize the spatial metrics.
  uint32_t pixelMSA = 0;

  uint32_t spatialErrSum = 0;
  uint32_t spatialErrVSum = 0;
  uint32_t spatialErrHSum = 0;

  // make sure work section is a multiple of 16
  const int width_end = ((sizej - 2*border_) & -16) + border_;

  for (int i = border_; i < sizei - border_; i += skip_num_) {
    for (int j = border_; j < width_end; j++) {
      int ssn1=  i * sizej + j;
      int ssn2 = (i + 1) * sizej + j; // bottom
      int ssn3 = (i - 1) * sizej + j; // top
      int ssn4 = i * sizej + j + 1;   // right
      int ssn5 = i * sizej + j - 1;   // left

      uint16_t refPixel1  = orig_frame_[ssn1] << 1;
      uint16_t refPixel2  = orig_frame_[ssn1] << 2;

      uint8_t bottPixel = orig_frame_[ssn2];
      uint8_t topPixel = orig_frame_[ssn3];
      uint8_t rightPixel = orig_frame_[ssn4];
      uint8_t leftPixel = orig_frame_[ssn5];

      spatialErrSum  += (uint32_t) abs((int16_t)(refPixel2
          - (uint16_t)(bottPixel + topPixel + leftPixel + rightPixel)));
      spatialErrVSum += (uint32_t) abs((int16_t)(refPixel1
          - (uint16_t)(bottPixel + topPixel)));
      spatialErrHSum += (uint32_t) abs((int16_t)(refPixel1
          - (uint16_t)(leftPixel + rightPixel)));
      pixelMSA += orig_frame_[ssn1];
    }
  }

  // Normalize over all pixels.
  const float spatialErr = (float)(spatialErrSum >> 2);
  const float spatialErrH = (float)(spatialErrHSum >> 1);
  const float spatialErrV = (float)(spatialErrVSum >> 1);
  const float norm = (float)pixelMSA;

  // 2X2:
  spatial_pred_err_ = spatialErr / norm;
  // 1X2:
  spatial_pred_err_h_ = spatialErrH / norm;
  // 2X1:
  spatial_pred_err_v_ = spatialErrV / norm;
  return VPM_OK;
}

VideoContentMetrics* VPMContentAnalysis::ContentMetrics() {
  if (ca_Init_ == false) return NULL;

  content_metrics_->spatial_pred_err = spatial_pred_err_;
  content_metrics_->spatial_pred_err_h = spatial_pred_err_h_;
  content_metrics_->spatial_pred_err_v = spatial_pred_err_v_;
  // Motion metric: normalized temporal difference (MAD).
  content_metrics_->motion_magnitude = motion_magnitude_;

  return content_metrics_;
}

}  // namespace webrtc