aboutsummaryrefslogtreecommitdiff
path: root/wlauto/workloads/telemetry/__init__.py
blob: 5333c0983ecee89726620c1faefa58db0954774e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
#    Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

# pylint: disable=attribute-defined-outside-init
import os
import re
import csv
import shutil
import json
import urllib
import stat
from zipfile import is_zipfile, ZipFile

try:
    import pandas as pd
except ImportError:
    pd = None

from wlauto import Workload, Parameter
from wlauto.exceptions import WorkloadError, ConfigError
from wlauto.utils.misc import check_output, get_null, get_meansd
from wlauto.utils.types import numeric


RESULT_REGEX = re.compile(r'RESULT ([^:]+): ([^=]+)\s*=\s*'  # preamble and test/metric name
                          r'(\[([^\]]+)\]|(\S+))'  # value
                          r'\s*(\S+)')  # units
TRACE_REGEX = re.compile(r'Trace saved as ([^\n]+)')

# Trace event that signifies rendition of a Frame
FRAME_EVENT = 'SwapBuffersLatency'

TELEMETRY_ARCHIVE_URL = 'http://storage.googleapis.com/chromium-telemetry/snapshots/telemetry.zip'


class Telemetry(Workload):

    name = 'telemetry'
    description = """
    Executes Google's Telemetery benchmarking framework

    Url: https://www.chromium.org/developers/telemetry

    From the web site:

    Telemetry is Chrome's performance testing framework. It allows you to
    perform arbitrary actions on a set of web pages and report metrics about
    it. The framework abstracts:

      - Launching a browser with arbitrary flags on any platform.
      - Opening a tab and navigating to the page under test.
      - Fetching data via the Inspector timeline and traces.
      - Using Web Page Replay to cache real-world websites so they don't
        change when used in benchmarks.

    Design Principles

      - Write one performance test that runs on all platforms - Windows, Mac,
        Linux, Chrome OS, and Android for both Chrome and ContentShell.
      - Runs on browser binaries, without a full Chromium checkout, and without
        having to build the browser yourself.
      - Use WebPageReplay to get repeatable test results.
      - Clean architecture for writing benchmarks that keeps measurements and
        use cases separate.
      - Run on non-Chrome browsers for comparative studies.

    This instrument runs  telemetry via its ``run_benchmark`` script (which
    must be in PATH or specified using ``run_benchmark_path`` parameter) and
    parses metrics from the resulting output.

    **device setup**

    The device setup will depend on whether you're running a test image (in
    which case little or no setup should be necessary)


    """

    supported_platforms = ['android', 'chromeos']

    parameters = [
        Parameter('run_benchmark_path', default=None,
                  description="""
                  This is the path to run_benchmark script which runs a
                  Telemetry benchmark. If not specified, WA will look for Telemetry in its
                  dependencies; if not found there, Telemetry will be downloaded.
                  """),
        Parameter('test', default='page_cycler.top_10_mobile',
                  description="""
                  Specifies the telemetry test to run.
                  """),
        Parameter('run_benchmark_params', default='',
                  description="""
                  Additional paramters to be passed to ``run_benchmark``.
                  """),
        Parameter('run_timeout', kind=int, default=900,
                  description="""
                  Timeout for execution of the test.
                  """),
        Parameter('extract_fps', kind=bool, default=False,
                  description="""
                  if ``True``, FPS for the run will be computed from the trace (must be enabled).
                  """),
        Parameter('target_config', kind=str, default=None,
                  description="""
                  Manually specify target configuration for telemetry. This must contain
                  --browser option plus any addition options Telemetry requires for a particular
                  target (e.g. --device or --remote)
                  """),
    ]

    def validate(self):
        ret = os.system('{} > {} 2>&1'.format(self.run_benchmark_path, get_null()))
        if ret > 255:
            pass  # telemetry found and appears to be installed properly.
        elif ret == 127:
            raise WorkloadError('run_benchmark not found (did you specify correct run_benchmark_path?)')
        else:
            raise WorkloadError('Unexected error from run_benchmark: {}'.format(ret))
        if self.extract_fps and 'trace' not in self.run_benchmark_params:
            raise ConfigError('"trace" profiler must be enabled in order to extract FPS for Telemetry')
        self._resolve_run_benchmark_path()

    def setup(self, context):
        self.raw_output = None
        self.error_output = None
        self.command = self.build_command()

    def run(self, context):
        self.logger.debug(self.command)
        self.raw_output, self.error_output = check_output(self.command, shell=True, timeout=self.run_timeout, ignore='all')

    def update_result(self, context):  # pylint: disable=too-many-locals
        if self.error_output:
            self.logger.error('run_benchmarks output contained errors:\n' + self.error_output)
        elif not self.raw_output:
            self.logger.warning('Did not get run_benchmark output.')
            return
        raw_outfile = os.path.join(context.output_directory, 'telemetry_raw.out')
        with open(raw_outfile, 'w') as wfh:
            wfh.write(self.raw_output)
        context.add_artifact('telemetry-raw', raw_outfile, kind='raw')

        results, artifacts = parse_telemetry_results(raw_outfile)
        csv_outfile = os.path.join(context.output_directory, 'telemetry.csv')
        with open(csv_outfile, 'wb') as wfh:
            writer = csv.writer(wfh)
            writer.writerow(['kind', 'url', 'iteration', 'value', 'units'])
            for result in results:
                writer.writerows(result.rows)

                for i, value in enumerate(result.values, 1):
                    context.add_metric(result.kind, value, units=result.units,
                                       classifiers={'url': result.url, 'time': i})

            context.add_artifact('telemetry', csv_outfile, kind='data')

        for idx, artifact in enumerate(artifacts):
            if is_zipfile(artifact):
                zf = ZipFile(artifact)
                for item in zf.infolist():
                    zf.extract(item, context.output_directory)
                    zf.close()
                    context.add_artifact('telemetry_trace_{}'.format(idx), path=item.filename, kind='data')
            else:  # not a zip archive
                wa_path = os.path.join(context.output_directory,
                                       os.path.basename(artifact))
                shutil.copy(artifact, wa_path)
                context.add_artifact('telemetry_artifact_{}'.format(idx), path=wa_path, kind='data')

        if self.extract_fps:
            self.logger.debug('Extracting FPS...')
            _extract_fps(context)

    def build_command(self):
        device_opts = ''
        if self.target_config:
            device_opts = self.target_config
        else:
            if self.device.platform == 'chromeos':
                if '--remote' not in self.run_benchmark_params:
                    device_opts += '--remote={} '.format(self.device.host)
                if '--browser' not in self.run_benchmark_params:
                    device_opts += '--browser=cros-chrome '
            elif self.device.platform == 'android':
                if '--device' not in self.run_benchmark_params and self.device.adb_name:
                    device_opts += '--device={} '.format(self.device.adb_name)
                if '--browser' not in self.run_benchmark_params:
                    device_opts += '--browser=android-webview-shell '
            else:
                raise WorkloadError('Unless you\'re running Telemetry on a ChromeOS or Android device, '
                                    'you mast specify target_config option')
        return '{} {} {} {}'.format(self.run_benchmark_path,
                                    self.test,
                                    device_opts,
                                    self.run_benchmark_params)

    def _resolve_run_benchmark_path(self):
        # pylint: disable=access-member-before-definition
        if self.run_benchmark_path:
            if not os.path.exists(self.run_benchmark_path):
                raise ConfigError('run_benchmark path "{}" does not exist'.format(self.run_benchmark_path))
        else:
            self.run_benchmark_path = os.path.join(self.dependencies_directory, 'telemetry', 'run_benchmark')
            self.logger.debug('run_benchmark_path not specified using {}'.format(self.run_benchmark_path))
            if not os.path.exists(self.run_benchmark_path):
                self.logger.debug('Telemetry not found locally; downloading...')
                local_archive = os.path.join(self.dependencies_directory, 'telemetry.zip')
                urllib.urlretrieve(TELEMETRY_ARCHIVE_URL, local_archive)
                zf = ZipFile(local_archive)
                zf.extractall(self.dependencies_directory)
            if not os.path.exists(self.run_benchmark_path):
                raise WorkloadError('Could not download and extract Telemetry')
            old_mode = os.stat(self.run_benchmark_path).st_mode
            os.chmod(self.run_benchmark_path, old_mode | stat.S_IXUSR)


def _extract_fps(context):
    trace_files = [a.path for a in context.iteration_artifacts
                   if a.name.startswith('telemetry_trace_')]
    for tf in trace_files:
        name = os.path.splitext(os.path.basename(tf))[0]
        fps_file = os.path.join(context.output_directory, name + '-fps.csv')
        with open(tf) as fh:
            data = json.load(fh)
            events = pd.Series([e['ts'] for e in data['traceEvents'] if
                                FRAME_EVENT == e['name']])
            fps = (1000000 / (events - events.shift(1)))
            fps.index = events
            df = fps.dropna().reset_index()
            df.columns = ['timestamp', 'fps']
            with open(fps_file, 'w') as wfh:
                df.to_csv(wfh, index=False)
            context.add_artifact('{}_fps'.format(name), fps_file, kind='data')
            context.result.add_metric('{} FPS'.format(name), df.fps.mean(),
                                      units='fps')
            context.result.add_metric('{} FPS (std)'.format(name), df.fps.std(),
                                      units='fps', lower_is_better=True)


class TelemetryResult(object):

    @property
    def average(self):
        return get_meansd(self.values)[0]

    @property
    def std(self):
        return get_meansd(self.values)[1]

    @property
    def rows(self):
        for i, v in enumerate(self.values):
            yield [self.kind, self.url, i, v, self.units]

    def __init__(self, kind=None, url=None, values=None, units=None):
        self.kind = kind
        self.url = url
        self.values = values or []
        self.units = units

    def __str__(self):
        return 'TR({kind},{url},{values},{units})'.format(**self.__dict__)

    __repr__ = __str__


def parse_telemetry_results(filepath):
    results = []
    artifacts = []
    with open(filepath) as fh:
        for line in fh:
            match = RESULT_REGEX.search(line)
            if match:
                result = TelemetryResult()
                result.kind = match.group(1)
                result.url = match.group(2)
                if match.group(4):
                    result.values = map(numeric, match.group(4).split(','))
                else:
                    result.values = [numeric(match.group(5))]
                result.units = match.group(6)
                results.append(result)
            match = TRACE_REGEX.search(line)
            if match:
                artifacts.append(match.group(1))
    return results, artifacts


if __name__ == '__main__':
    import sys  # pylint: disable=wrong-import-order,wrong-import-position
    from pprint import pprint  # pylint: disable=wrong-import-order,wrong-import-position
    path = sys.argv[1]
    pprint(parse_telemetry_results(path))