aboutsummaryrefslogtreecommitdiff
path: root/catapult/telemetry/telemetry/web_perf/timeline_based_measurement.py
blob: 151e23dbbae8c3d9cb613a24058589970289758c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import logging
from collections import defaultdict

from tracing.metrics import metric_runner

from telemetry.timeline import chrome_trace_category_filter
from telemetry.timeline import model as model_module
from telemetry.timeline import tracing_config
from telemetry.value import trace
from telemetry.value import common_value_helpers
from telemetry.web_perf.metrics import timeline_based_metric
from telemetry.web_perf.metrics import blob_timeline
from telemetry.web_perf.metrics import jitter_timeline
from telemetry.web_perf.metrics import webrtc_rendering_timeline
from telemetry.web_perf.metrics import gpu_timeline
from telemetry.web_perf.metrics import indexeddb_timeline
from telemetry.web_perf.metrics import layout
from telemetry.web_perf.metrics import smoothness
from telemetry.web_perf.metrics import text_selection
from telemetry.web_perf import smooth_gesture_util
from telemetry.web_perf import story_test
from telemetry.web_perf import timeline_interaction_record as tir_module

# TimelineBasedMeasurement considers all instrumentation as producing a single
# timeline. But, depending on the amount of instrumentation that is enabled,
# overhead increases. The user of the measurement must therefore chose between
# a few levels of instrumentation.
LOW_OVERHEAD_LEVEL = 'low-overhead'
DEFAULT_OVERHEAD_LEVEL = 'default-overhead'
DEBUG_OVERHEAD_LEVEL = 'debug-overhead'

ALL_OVERHEAD_LEVELS = [
  LOW_OVERHEAD_LEVEL,
  DEFAULT_OVERHEAD_LEVEL,
  DEBUG_OVERHEAD_LEVEL,
]


def _GetAllLegacyTimelineBasedMetrics():
  # TODO(nednguyen): use discovery pattern to return all the instances of
  # all TimelineBasedMetrics class in web_perf/metrics/ folder.
  # This cannot be done until crbug.com/460208 is fixed.
  return (smoothness.SmoothnessMetric(),
          layout.LayoutMetric(),
          gpu_timeline.GPUTimelineMetric(),
          blob_timeline.BlobTimelineMetric(),
          jitter_timeline.JitterTimelineMetric(),
          text_selection.TextSelectionMetric(),
          indexeddb_timeline.IndexedDBTimelineMetric(),
          webrtc_rendering_timeline.WebRtcRenderingTimelineMetric())


class InvalidInteractions(Exception):
  pass


# TODO(nednguyen): Get rid of this results wrapper hack after we add interaction
# record to telemetry value system (crbug.com/453109)
class ResultsWrapperInterface(object):
  def __init__(self):
    self._tir_label = None
    self._results = None

  def SetResults(self, results):
    self._results = results

  def SetTirLabel(self, tir_label):
    self._tir_label = tir_label

  @property
  def current_page(self):
    return self._results.current_page

  def AddValue(self, value):
    raise NotImplementedError


class _TBMResultWrapper(ResultsWrapperInterface):
  def AddValue(self, value):
    assert self._tir_label
    if value.tir_label:
      assert value.tir_label == self._tir_label
    else:
      value.tir_label = self._tir_label
    self._results.AddValue(value)


def _GetRendererThreadsToInteractionRecordsMap(model):
  threads_to_records_map = defaultdict(list)
  interaction_labels_of_previous_threads = set()
  for curr_thread in model.GetAllThreads():
    for event in curr_thread.async_slices:
      # TODO(nduca): Add support for page-load interaction record.
      if tir_module.IsTimelineInteractionRecord(event.name):
        interaction = tir_module.TimelineInteractionRecord.FromAsyncEvent(event)
        # Adjust the interaction record to match the synthetic gesture
        # controller if needed.
        interaction = (
            smooth_gesture_util.GetAdjustedInteractionIfContainGesture(
                model, interaction))
        threads_to_records_map[curr_thread].append(interaction)
        if interaction.label in interaction_labels_of_previous_threads:
          raise InvalidInteractions(
            'Interaction record label %s is duplicated on different '
            'threads' % interaction.label)
    if curr_thread in threads_to_records_map:
      interaction_labels_of_previous_threads.update(
        r.label for r in threads_to_records_map[curr_thread])

  return threads_to_records_map


class _TimelineBasedMetrics(object):
  def __init__(self, model, renderer_thread, interaction_records,
               results_wrapper, metrics):
    self._model = model
    self._renderer_thread = renderer_thread
    self._interaction_records = interaction_records
    self._results_wrapper = results_wrapper
    self._all_metrics = metrics

  def AddResults(self, results):
    interactions_by_label = defaultdict(list)
    for i in self._interaction_records:
      interactions_by_label[i.label].append(i)

    for label, interactions in interactions_by_label.iteritems():
      are_repeatable = [i.repeatable for i in interactions]
      if not all(are_repeatable) and len(interactions) > 1:
        raise InvalidInteractions('Duplicate unrepeatable interaction records '
                                  'on the page')
      self._results_wrapper.SetResults(results)
      self._results_wrapper.SetTirLabel(label)
      self.UpdateResultsByMetric(interactions, self._results_wrapper)

  def UpdateResultsByMetric(self, interactions, wrapped_results):
    if not interactions:
      return

    for metric in self._all_metrics:
      metric.AddResults(self._model, self._renderer_thread,
                        interactions, wrapped_results)


class Options(object):
  """A class to be used to configure TimelineBasedMeasurement.

  This is created and returned by
  Benchmark.CreateTimelineBasedMeasurementOptions.

  By default, all the timeline based metrics in telemetry/web_perf/metrics are
  used (see _GetAllLegacyTimelineBasedMetrics above).
  To customize your metric needs, use SetTimelineBasedMetrics().
  """

  def __init__(self, overhead_level=LOW_OVERHEAD_LEVEL):
    """As the amount of instrumentation increases, so does the overhead.
    The user of the measurement chooses the overhead level that is appropriate,
    and the tracing is filtered accordingly.

    overhead_level: Can either be a custom ChromeTraceCategoryFilter object or
        one of LOW_OVERHEAD_LEVEL, DEFAULT_OVERHEAD_LEVEL or
        DEBUG_OVERHEAD_LEVEL.
    """
    self._config = tracing_config.TracingConfig()
    self._config.enable_chrome_trace = True
    self._config.enable_platform_display_trace = False

    if isinstance(overhead_level,
                  chrome_trace_category_filter.ChromeTraceCategoryFilter):
      self._config.chrome_trace_config.SetCategoryFilter(overhead_level)
    elif overhead_level in ALL_OVERHEAD_LEVELS:
      if overhead_level == LOW_OVERHEAD_LEVEL:
        self._config.chrome_trace_config.SetLowOverheadFilter()
      elif overhead_level == DEFAULT_OVERHEAD_LEVEL:
        self._config.chrome_trace_config.SetDefaultOverheadFilter()
      else:
        self._config.chrome_trace_config.SetDebugOverheadFilter()
    else:
      raise Exception("Overhead level must be a ChromeTraceCategoryFilter "
                      "object or valid overhead level string. Given overhead "
                      "level: %s" % overhead_level)

    self._timeline_based_metrics = None
    self._legacy_timeline_based_metrics = []


  def ExtendTraceCategoryFilter(self, filters):
    category_filter = self._config.chrome_trace_config.category_filter
    for new_category_filter in filters:
      category_filter.AddIncludedCategory(new_category_filter)

  @property
  def category_filter(self):
    return self._config.chrome_trace_config.category_filter

  @property
  def config(self):
    return self._config

  def AddTimelineBasedMetric(self, metric):
    assert isinstance(metric, basestring)
    if self._timeline_based_metrics is None:
      self._timeline_based_metrics = []
    self._timeline_based_metrics.append(metric)

  def SetTimelineBasedMetrics(self, metrics):
    """Sets the new-style (TBMv2) metrics to run.

    Metrics are assumed to live in //tracing/tracing/metrics, so the path you
    pass in should be relative to that. For example, to specify
    sample_metric.html, you should pass in ['sample_metric.html'].

    Args:
      metrics: A list of strings giving metric paths under
          //tracing/tracing/metrics.
    """
    assert isinstance(metrics, list)
    for metric in metrics:
      assert isinstance(metric, basestring)
    self._timeline_based_metrics = metrics

  def GetTimelineBasedMetrics(self):
    return self._timeline_based_metrics

  def SetLegacyTimelineBasedMetrics(self, metrics):
    assert isinstance(metrics, collections.Iterable)
    for m in metrics:
      assert isinstance(m, timeline_based_metric.TimelineBasedMetric)
    self._legacy_timeline_based_metrics = metrics

  def GetLegacyTimelineBasedMetrics(self):
    return self._legacy_timeline_based_metrics


class TimelineBasedMeasurement(story_test.StoryTest):
  """Collects multiple metrics based on their interaction records.

  A timeline based measurement shifts the burden of what metrics to collect onto
  the story under test. Instead of the measurement
  having a fixed set of values it collects, the story being tested
  issues (via javascript) an Interaction record into the user timing API that
  describing what is happening at that time, as well as a standardized set
  of flags describing the semantics of the work being done. The
  TimelineBasedMeasurement object collects a trace that includes both these
  interaction records, and a user-chosen amount of performance data using
  Telemetry's various timeline-producing APIs, tracing especially.

  It then passes the recorded timeline to different TimelineBasedMetrics based
  on those flags. As an example, this allows a single story run to produce
  load timing data, smoothness data, critical jank information and overall cpu
  usage information.

  For information on how to mark up a page to work with
  TimelineBasedMeasurement, refer to the
  perf.metrics.timeline_interaction_record module.

  Args:
      options: an instance of timeline_based_measurement.Options.
      results_wrapper: A class that has the __init__ method takes in
        the page_test_results object and the interaction record label. This
        class follows the ResultsWrapperInterface. Note: this class is not
        supported long term and to be removed when crbug.com/453109 is resolved.
  """
  def __init__(self, options, results_wrapper=None):
    self._tbm_options = options
    self._results_wrapper = results_wrapper or _TBMResultWrapper()

  def WillRunStory(self, platform):
    """Configure and start tracing."""
    if not platform.tracing_controller.IsChromeTracingSupported():
      raise Exception('Not supported')
    if self._tbm_options.config.enable_chrome_trace:
      # Always enable 'blink.console' category for:
      # 1) Backward compat of chrome clock sync (crbug.com/646925)
      # 2) Allows users to add trace event through javascript.
      # Note that blink.console is extremely low-overhead, so this doesn't
      # affect the tracing overhead budget much.
      chrome_config = self._tbm_options.config.chrome_trace_config
      chrome_config.category_filter.AddIncludedCategory('blink.console')
    platform.tracing_controller.StartTracing(self._tbm_options.config)

  def Measure(self, platform, results):
    """Collect all possible metrics and added them to results."""
    platform.tracing_controller.telemetry_info = results.telemetry_info
    trace_result = platform.tracing_controller.StopTracing()
    trace_value = trace.TraceValue(results.current_page, trace_result)
    results.AddValue(trace_value)

    if self._tbm_options.GetTimelineBasedMetrics():
      self._ComputeTimelineBasedMetrics(results, trace_value)
      # Legacy metrics can be computed, but only if explicitly specified.
      if self._tbm_options.GetLegacyTimelineBasedMetrics():
        self._ComputeLegacyTimelineBasedMetrics(results, trace_result)
    else:
      # Run all TBMv1 metrics if no other metric is specified (legacy behavior)
      if not self._tbm_options.GetLegacyTimelineBasedMetrics():
        logging.warn('Please specify the TBMv1 metrics you are interested in '
                     'explicitly. This implicit functionality will be removed '
                     'on July 17, 2016.')
        self._tbm_options.SetLegacyTimelineBasedMetrics(
            _GetAllLegacyTimelineBasedMetrics())
      self._ComputeLegacyTimelineBasedMetrics(results, trace_result)

  def DidRunStory(self, platform):
    """Clean up after running the story."""
    if platform.tracing_controller.is_tracing_running:
      platform.tracing_controller.StopTracing()

  def _ComputeTimelineBasedMetrics(self, results, trace_value):
    metrics = self._tbm_options.GetTimelineBasedMetrics()
    extra_import_options = {
      'trackDetailedModelStats': True
    }

    mre_result = metric_runner.RunMetric(
        trace_value.filename, metrics, extra_import_options)
    page = results.current_page

    failure_dicts = mre_result.failures
    for d in failure_dicts:
      results.AddValue(
          common_value_helpers.TranslateMreFailure(d, page))

    results.value_set.extend(mre_result.pairs.get('histograms', []))

    for d in mre_result.pairs.get('scalars', []):
      results.AddValue(common_value_helpers.TranslateScalarValue(d, page))

  def _ComputeLegacyTimelineBasedMetrics(self, results, trace_result):
    model = model_module.TimelineModel(trace_result)
    threads_to_records_map = _GetRendererThreadsToInteractionRecordsMap(model)
    if (len(threads_to_records_map.values()) == 0 and
        self._tbm_options.config.enable_chrome_trace):
      logging.warning(
          'No timeline interaction records were recorded in the trace. '
          'This could be caused by console.time() & console.timeEnd() execution'
          ' failure or the tracing category specified doesn\'t include '
          'blink.console categories.')

    all_metrics = self._tbm_options.GetLegacyTimelineBasedMetrics()

    for renderer_thread, interaction_records in (
        threads_to_records_map.iteritems()):
      meta_metrics = _TimelineBasedMetrics(
          model, renderer_thread, interaction_records, self._results_wrapper,
          all_metrics)
      meta_metrics.AddResults(results)

    for metric in all_metrics:
      metric.AddWholeTraceResults(model, results)