aboutsummaryrefslogtreecommitdiff
path: root/wlauto/instrumentation
diff options
context:
space:
mode:
authorSergei Trofimov <sergei.trofimov@arm.com>2015-03-10 13:09:31 +0000
committerSergei Trofimov <sergei.trofimov@arm.com>2015-03-10 13:09:31 +0000
commita747ec7e4c2ea8a25bfc675f80042eb6600c7050 (patch)
tree077c0439a89a5c33b9fa1dbf9e81146ca9960d3c /wlauto/instrumentation
Initial commit of open source Workload Automation.
Diffstat (limited to 'wlauto/instrumentation')
-rw-r--r--wlauto/instrumentation/__init__.py27
-rw-r--r--wlauto/instrumentation/coreutil/__init__.py278
-rw-r--r--wlauto/instrumentation/daq/__init__.py221
-rw-r--r--wlauto/instrumentation/delay/__init__.py181
-rw-r--r--wlauto/instrumentation/dmesg/__init__.py62
-rw-r--r--wlauto/instrumentation/energy_probe/__init__.py145
-rw-r--r--wlauto/instrumentation/fps/__init__.py298
-rw-r--r--wlauto/instrumentation/hwmon/__init__.py120
-rw-r--r--wlauto/instrumentation/juno_energy/__init__.py77
-rwxr-xr-xwlauto/instrumentation/juno_energy/readenergybin0 -> 695696 bytes
-rw-r--r--wlauto/instrumentation/misc/__init__.py365
-rw-r--r--wlauto/instrumentation/perf/LICENSE9
-rw-r--r--wlauto/instrumentation/perf/__init__.py176
-rwxr-xr-xwlauto/instrumentation/perf/bin/arm64/perfbin0 -> 6149310 bytes
-rwxr-xr-xwlauto/instrumentation/perf/bin/armeabi/perfbin0 -> 4964116 bytes
-rw-r--r--wlauto/instrumentation/pmu_logger/__init__.py148
-rw-r--r--wlauto/instrumentation/streamline/__init__.py298
-rw-r--r--wlauto/instrumentation/trace_cmd/LICENSE39
-rw-r--r--wlauto/instrumentation/trace_cmd/__init__.py322
-rwxr-xr-xwlauto/instrumentation/trace_cmd/bin/arm64/trace-cmdbin0 -> 1475074 bytes
-rwxr-xr-xwlauto/instrumentation/trace_cmd/bin/armeabi/trace-cmdbin0 -> 1170276 bytes
21 files changed, 2766 insertions, 0 deletions
diff --git a/wlauto/instrumentation/__init__.py b/wlauto/instrumentation/__init__.py
new file mode 100644
index 00000000..094b8fa6
--- /dev/null
+++ b/wlauto/instrumentation/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from wlauto.core import instrumentation
+
+
+def instrument_is_installed(instrument):
+ """Returns ``True`` if the specified instrument is installed, and ``False``
+ other wise. The insturment maybe specified either as a name or a subclass (or
+ instance of subclass) of :class:`wlauto.core.Instrument`."""
+ return instrumentation.is_installed(instrument)
+
+
+def clear_instrumentation():
+ instrumentation.installed = []
diff --git a/wlauto/instrumentation/coreutil/__init__.py b/wlauto/instrumentation/coreutil/__init__.py
new file mode 100644
index 00000000..e63f8c3e
--- /dev/null
+++ b/wlauto/instrumentation/coreutil/__init__.py
@@ -0,0 +1,278 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import os
+import sys
+import re
+import time
+import shutil
+import logging
+import threading
+import subprocess
+import tempfile
+import csv
+
+from wlauto import Instrument, Parameter
+from wlauto.core.execution import ExecutionContext
+from wlauto.exceptions import InstrumentError, WorkerThreadError
+from wlauto.core import signal
+
+
+class CoreUtilization(Instrument):
+
+ name = 'coreutil'
+ description = """
+ Measures CPU core activity during workload execution in terms of the percentage of time a number
+ of cores were utilized above the specfied threshold.
+
+ This workload generates ``coreutil.csv`` report in the workload's output directory. The report is
+ formatted as follows::
+
+ <threshold,1core,2core,3core,4core
+ 18.098132,38.650248000000005,10.736180000000001,3.6809760000000002,28.834312000000001
+
+ Interpretation of the result:
+
+ - 38.65% of total time only single core is running above or equal to threshold value
+ - 10.736% of total time two cores are running simultaneously above or equal to threshold value
+ - 3.6809% of total time three cores are running simultaneously above or equal to threshold value
+ - 28.8314% of total time four cores are running simultaneously above or equal to threshold value
+ - 18.098% of time all core are running below threshold value.
+
+ ..note : This instrument doesn't work on ARM big.LITTLE IKS implementation
+
+ """
+
+ parameters = [
+ Parameter('threshold', kind=int, default=50,
+ constraint=lambda x: 0 < x <= 100,
+ description='Cores with percentage utilization above this value will be considered '
+ 'as "utilized". This value may need to be adjusted based on the background '
+ 'activity and the intensity of the workload being instrumented (e.g. it may '
+ 'need to be lowered for low-intensity workloads such as video playback).'
+ )
+ ]
+
+ def __init__(self, device, **kwargs):
+ super(CoreUtilization, self).__init__(device, **kwargs)
+ self.collector = None
+ self.output_dir = None
+ self.cores = None
+ self.output_artifact_registered = False
+
+ def setup(self, context):
+ ''' Calls ProcCollect class '''
+ self.output_dir = context.output_directory
+ self.collector = ProcCollect(self.device, self.logger, self.output_dir)
+ self.cores = self.device.number_of_cores
+
+ def start(self, context): # pylint: disable=W0613
+ ''' Starts collecting data once the workload starts '''
+ self.logger.debug('Starting to collect /proc/stat data')
+ self.collector.start()
+
+ def stop(self, context): # pylint: disable=W0613
+ ''' Stops collecting data once the workload stops '''
+ self.logger.debug('Stopping /proc/stat data collection')
+ self.collector.stop()
+
+ def update_result(self, context):
+ ''' updates result into coreutil.csv '''
+ self.collector.join() # wait for "proc.txt" to generate.
+ context.add_artifact('proctxt', 'proc.txt', 'raw')
+ calc = Calculator(self.cores, self.threshold, context) # pylint: disable=E1101
+ calc.calculate()
+ if not self.output_artifact_registered:
+ context.add_run_artifact('cpuutil', 'coreutil.csv', 'data')
+ self.output_artifact_registered = True
+
+
+class ProcCollect(threading.Thread):
+ ''' Dumps data into proc.txt '''
+
+ def __init__(self, device, logger, out_dir):
+ super(ProcCollect, self).__init__()
+ self.device = device
+ self.logger = logger
+ self.dire = out_dir
+ self.stop_signal = threading.Event()
+ self.command = 'cat /proc/stat'
+ self.exc = None
+
+ def run(self):
+ try:
+ self.stop_signal.clear()
+ _, temp_file = tempfile.mkstemp()
+ self.logger.debug('temp file : {}'.format(temp_file))
+ with open(temp_file, 'wb') as tempfp:
+ while not self.stop_signal.is_set():
+ tempfp.write(self.device.execute(self.command))
+ tempfp.write('\n')
+ time.sleep(0.5)
+ raw_file = os.path.join(self.dire, 'proc.txt')
+ shutil.copy(temp_file, raw_file)
+ os.unlink(temp_file)
+ except Exception, error: # pylint: disable=W0703
+ self.logger.warning('Exception on collector thread : {}({})'.format(error.__class__.__name__, error))
+ self.exc = WorkerThreadError(self.name, sys.exc_info())
+
+ def stop(self):
+ '''Executed once the workload stops'''
+ self.stop_signal.set()
+ if self.exc is not None:
+ raise self.exc # pylint: disable=E0702
+
+
+class Calculator(object):
+ """
+ Read /proc/stat and dump data into ``proc.txt`` which is parsed to generate ``coreutil.csv``
+ Sample output from 'proc.txt' ::
+
+ ----------------------------------------------------------------------
+ cpu 9853753 51448 3248855 12403398 4241 111 14996 0 0 0
+ cpu0 1585220 7756 1103883 4977224 552 97 10505 0 0 0
+ cpu1 2141168 7243 564347 972273 504 4 1442 0 0 0
+ cpu2 1940681 7994 651946 1005534 657 3 1424 0 0 0
+ cpu3 1918013 8833 667782 1012249 643 3 1326 0 0 0
+ cpu4 165429 5363 50289 1118910 474 0 148 0 0 0
+ cpu5 1661299 4910 126654 1104018 480 0 53 0 0 0
+ cpu6 333642 4657 48296 1102531 482 2 55 0 0 0
+ cpu7 108299 4691 35656 1110658 448 0 41 0 0 0
+ ----------------------------------------------------------------------
+ Description:
+
+ 1st column : cpu_id( cpu0, cpu1, cpu2,......)
+ Next all column represents the amount of time, measured in units of USER_HZ
+ 2nd column : Time spent in user mode
+ 3rd column : Time spent in user mode with low priority
+ 4th column : Time spent in system mode
+ 5th column : Time spent in idle task
+ 6th column : Time waiting for i/o to compelete
+ 7th column : Time servicing interrupts
+ 8th column : Time servicing softirqs
+ 9th column : Stolen time is the time spent in other operating systems
+ 10th column : Time spent running a virtual CPU
+ 11th column : Time spent running a niced guest
+
+ ----------------------------------------------------------------------------
+
+ Procedure to calculate instantaneous CPU utilization:
+
+ 1) Subtract two consecutive samples for every column( except 1st )
+ 2) Sum all the values except "Time spent in idle task"
+ 3) CPU utilization(%) = ( value obtained in 2 )/sum of all the values)*100
+
+ """
+
+ idle_time_index = 3
+
+ def __init__(self, cores, threshold, context):
+ self.cores = cores
+ self.threshold = threshold
+ self.context = context
+ self.cpu_util = None # Store CPU utilization for each core
+ self.active = None # Store active time(total time - idle)
+ self.total = None # Store the total amount of time (in USER_HZ)
+ self.output = None
+ self.cpuid_regex = re.compile(r'cpu(\d+)')
+ self.outfile = os.path.join(context.run_output_directory, 'coreutil.csv')
+ self.infile = os.path.join(context.output_directory, 'proc.txt')
+
+ def calculate(self):
+ self.calculate_total_active()
+ self.calculate_core_utilization()
+ self.generate_csv(self.context)
+
+ def calculate_total_active(self):
+ """ Read proc.txt file and calculate 'self.active' and 'self.total' """
+ all_cores = set(xrange(self.cores))
+ self.total = [[] for _ in all_cores]
+ self.active = [[] for _ in all_cores]
+ with open(self.infile, "r") as fh:
+ # parsing logic:
+ # - keep spinning through lines until see the cpu summary line
+ # (taken to indicate start of new record).
+ # - extract values for individual cores after the summary line,
+ # keeping track of seen cores until no more lines match 'cpu\d+'
+ # pattern.
+ # - For every core not seen in this record, pad zeros.
+ # - Loop
+ try:
+ while True:
+ line = fh.next()
+ if not line.startswith('cpu '):
+ continue
+
+ seen_cores = set([])
+ line = fh.next()
+ match = self.cpuid_regex.match(line)
+ while match:
+ cpu_id = int(match.group(1))
+ seen_cores.add(cpu_id)
+ times = map(int, line.split()[1:]) # first column is the cpu_id
+ self.total[cpu_id].append(sum(times))
+ self.active[cpu_id].append(sum(times) - times[self.idle_time_index])
+ line = fh.next()
+ match = self.cpuid_regex.match(line)
+
+ for unseen_core in all_cores - seen_cores:
+ self.total[unseen_core].append(0)
+ self.active[unseen_core].append(0)
+ except StopIteration: # EOF
+ pass
+
+ def calculate_core_utilization(self):
+ """Calculates CPU utilization"""
+ diff_active = [[] for _ in xrange(self.cores)]
+ diff_total = [[] for _ in xrange(self.cores)]
+ self.cpu_util = [[] for _ in xrange(self.cores)]
+ for i in xrange(self.cores):
+ for j in xrange(len(self.active[i]) - 1):
+ temp = self.active[i][j + 1] - self.active[i][j]
+ diff_active[i].append(temp)
+ diff_total[i].append(self.total[i][j + 1] - self.total[i][j])
+ if diff_total[i][j] == 0:
+ self.cpu_util[i].append(0)
+ else:
+ temp = float(diff_active[i][j]) / diff_total[i][j]
+ self.cpu_util[i].append(round((float(temp)) * 100, 2))
+
+ def generate_csv(self, context):
+ """ generates ``coreutil.csv``"""
+ self.output = [0 for _ in xrange(self.cores + 1)]
+ for i in range(len(self.cpu_util[0])):
+ count = 0
+ for j in xrange(len(self.cpu_util)):
+ if self.cpu_util[j][i] > round(float(self.threshold), 2):
+ count = count + 1
+ self.output[count] += 1
+ if self.cpu_util[0]:
+ scale_factor = round((float(1) / len(self.cpu_util[0])) * 100, 6)
+ else:
+ scale_factor = 0
+ for i in xrange(len(self.output)):
+ self.output[i] = self.output[i] * scale_factor
+ with open(self.outfile, 'a+') as tem:
+ writer = csv.writer(tem)
+ reader = csv.reader(tem)
+ if sum(1 for row in reader) == 0:
+ row = ['workload', 'iteration', '<threshold']
+ for i in xrange(1, self.cores + 1):
+ row.append('{}core'.format(i))
+ writer.writerow(row)
+ row = [context.result.workload.name, context.result.iteration]
+ row.extend(self.output)
+ writer.writerow(row)
diff --git a/wlauto/instrumentation/daq/__init__.py b/wlauto/instrumentation/daq/__init__.py
new file mode 100644
index 00000000..a0f5bbd1
--- /dev/null
+++ b/wlauto/instrumentation/daq/__init__.py
@@ -0,0 +1,221 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,E1101,access-member-before-definition,attribute-defined-outside-init
+from __future__ import division
+import os
+import sys
+import csv
+from collections import OrderedDict
+
+from wlauto import Instrument, Parameter
+from wlauto.exceptions import ConfigError, InstrumentError
+from wlauto.utils.misc import ensure_directory_exists as _d
+from wlauto.utils.types import list_of_ints, list_of_strs
+
+daqpower_path = os.path.join(os.path.dirname(__file__), '..', '..', 'external', 'daq_server', 'src')
+sys.path.insert(0, daqpower_path)
+try:
+ import daqpower.client as daq # pylint: disable=F0401
+ from daqpower.config import DeviceConfiguration, ServerConfiguration, ConfigurationError # pylint: disable=F0401
+except ImportError, e:
+ daq, DeviceConfiguration, ServerConfiguration, ConfigurationError = None, None, None, None
+ import_error_mesg = e.message
+sys.path.pop(0)
+
+
+UNITS = {
+ 'power': 'Watts',
+ 'voltage': 'Volts',
+}
+
+
+class Daq(Instrument):
+
+ name = 'daq'
+ description = """
+ DAQ instrument obtains the power consumption of the target device's core
+ measured by National Instruments Data Acquisition(DAQ) device.
+
+ WA communicates with a DAQ device server running on a Windows machine
+ (Please refer to :ref:`daq_setup`) over a network. You must specify the IP
+ address and port the server is listening on in the config file as follows ::
+
+ daq_server_host = '10.1.197.176'
+ daq_server_port = 45677
+
+ These values will be output by the server when you run it on Windows.
+
+ You must also specify the values of resistors (in Ohms) across which the
+ voltages are measured (Please refer to :ref:`daq_setup`). The values should be
+ specified as a list with an entry for each resistor, e.g.::
+
+ daq_resistor_values = [0.005, 0.005]
+
+ In addition to this mandatory configuration, you can also optionally specify the
+ following::
+
+ :daq_labels: Labels to be used for ports. Defaults to ``'PORT_<pnum>'``, where
+ 'pnum' is the number of the port.
+ :daq_device_id: The ID under which the DAQ is registered with the driver.
+ Defaults to ``'Dev1'``.
+ :daq_v_range: Specifies the voltage range for the SOC voltage channel on the DAQ
+ (please refer to :ref:`daq_setup` for details). Defaults to ``2.5``.
+ :daq_dv_range: Specifies the voltage range for the resistor voltage channel on
+ the DAQ (please refer to :ref:`daq_setup` for details).
+ Defaults to ``0.2``.
+ :daq_sampling_rate: DAQ sampling rate. DAQ will take this many samples each
+ second. Please note that this maybe limitted by your DAQ model
+ and then number of ports you're measuring (again, see
+ :ref:`daq_setup`). Defaults to ``10000``.
+ :daq_channel_map: Represents mapping from logical AI channel number to physical
+ connector on the DAQ (varies between DAQ models). The default
+ assumes DAQ 6363 and similar with AI channels on connectors
+ 0-7 and 16-23.
+
+ """
+
+ parameters = [
+ Parameter('server_host', kind=str, default='localhost',
+ description='The host address of the machine that runs the daq Server which the '
+ 'insturment communicates with.'),
+ Parameter('server_port', kind=int, default=56788,
+ description='The port number for daq Server in which daq insturment communicates '
+ 'with.'),
+ Parameter('device_id', kind=str, default='Dev1',
+ description='The ID under which the DAQ is registered with the driver.'),
+ Parameter('v_range', kind=float, default=2.5,
+ description='Specifies the voltage range for the SOC voltage channel on the DAQ '
+ '(please refer to :ref:`daq_setup` for details).'),
+ Parameter('dv_range', kind=float, default=0.2,
+ description='Specifies the voltage range for the resistor voltage channel on '
+ 'the DAQ (please refer to :ref:`daq_setup` for details).'),
+ Parameter('sampling_rate', kind=int, default=10000,
+ description='DAQ sampling rate. DAQ will take this many samples each '
+ 'second. Please note that this maybe limitted by your DAQ model '
+ 'and then number of ports you\'re measuring (again, see '
+ ':ref:`daq_setup`)'),
+ Parameter('resistor_values', kind=list, mandatory=True,
+ description='The values of resistors (in Ohms) across which the voltages are measured on '
+ 'each port.'),
+ Parameter('channel_map', kind=list_of_ints, default=(0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23),
+ description='Represents mapping from logical AI channel number to physical '
+ 'connector on the DAQ (varies between DAQ models). The default '
+ 'assumes DAQ 6363 and similar with AI channels on connectors '
+ '0-7 and 16-23.'),
+ Parameter('labels', kind=list_of_strs,
+ description='List of port labels. If specified, the lenght of the list must match '
+ 'the length of ``resistor_values``. Defaults to "PORT_<pnum>", where '
+ '"pnum" is the number of the port.')
+ ]
+
+ def initialize(self, context):
+ devices = self._execute_command('list_devices')
+ if not devices:
+ raise InstrumentError('DAQ: server did not report any devices registered with the driver.')
+ self._results = OrderedDict()
+
+ def setup(self, context):
+ self.logger.debug('Initialising session.')
+ self._execute_command('configure', config=self.device_config)
+
+ def slow_start(self, context):
+ self.logger.debug('Starting collecting measurements.')
+ self._execute_command('start')
+
+ def slow_stop(self, context):
+ self.logger.debug('Stopping collecting measurements.')
+ self._execute_command('stop')
+
+ def update_result(self, context): # pylint: disable=R0914
+ self.logger.debug('Downloading data files.')
+ output_directory = _d(os.path.join(context.output_directory, 'daq'))
+ self._execute_command('get_data', output_directory=output_directory)
+ for entry in os.listdir(output_directory):
+ context.add_iteration_artifact('DAQ_{}'.format(os.path.splitext(entry)[0]),
+ path=os.path.join('daq', entry),
+ kind='data',
+ description='DAQ power measurments.')
+ port = os.path.splitext(entry)[0]
+ path = os.path.join(output_directory, entry)
+ key = (context.spec.id, context.workload.name, context.current_iteration)
+ if key not in self._results:
+ self._results[key] = {}
+ with open(path) as fh:
+ reader = csv.reader(fh)
+ metrics = reader.next()
+ data = [map(float, d) for d in zip(*list(reader))]
+ n = len(data[0])
+ means = [s / n for s in map(sum, data)]
+ for metric, value in zip(metrics, means):
+ metric_name = '{}_{}'.format(port, metric)
+ context.result.add_metric(metric_name, round(value, 3), UNITS[metric])
+ self._results[key][metric_name] = round(value, 3)
+
+ def teardown(self, context):
+ self.logger.debug('Terminating session.')
+ self._execute_command('close')
+
+ def validate(self):
+ if not daq:
+ raise ImportError(import_error_mesg)
+ self._results = None
+ if self.labels:
+ if not (len(self.labels) == len(self.resistor_values)): # pylint: disable=superfluous-parens
+ raise ConfigError('Number of DAQ port labels does not match the number of resistor values.')
+ else:
+ self.labels = ['PORT_{}'.format(i) for i, _ in enumerate(self.resistor_values)]
+ self.server_config = ServerConfiguration(host=self.server_host,
+ port=self.server_port)
+ self.device_config = DeviceConfiguration(device_id=self.device_id,
+ v_range=self.v_range,
+ dv_range=self.dv_range,
+ sampling_rate=self.sampling_rate,
+ resistor_values=self.resistor_values,
+ channel_map=self.channel_map,
+ labels=self.labels)
+ try:
+ self.server_config.validate()
+ self.device_config.validate()
+ except ConfigurationError, ex:
+ raise ConfigError('DAQ configuration: ' + ex.message) # Re-raise as a WA error
+
+ def before_overall_results_processing(self, context):
+ if self._results:
+ headers = ['id', 'workload', 'iteration']
+ metrics = sorted(self._results.iteritems().next()[1].keys())
+ headers += metrics
+ rows = [headers]
+ for key, value in self._results.iteritems():
+ rows.append(list(key) + [value[m] for m in metrics])
+
+ outfile = os.path.join(context.output_directory, 'daq_power.csv')
+ with open(outfile, 'wb') as fh:
+ writer = csv.writer(fh)
+ writer.writerows(rows)
+
+ def _execute_command(self, command, **kwargs):
+ # pylint: disable=E1101
+ result = daq.execute_command(self.server_config, command, **kwargs)
+ if result.status == daq.Status.OK:
+ pass # all good
+ elif result.status == daq.Status.OKISH:
+ self.logger.debug(result.message)
+ elif result.status == daq.Status.ERROR:
+ raise InstrumentError('DAQ: {}'.format(result.message))
+ else:
+ raise InstrumentError('DAQ: Unexpected result: {} - {}'.format(result.status, result.message))
+ return result.data
diff --git a/wlauto/instrumentation/delay/__init__.py b/wlauto/instrumentation/delay/__init__.py
new file mode 100644
index 00000000..e942520e
--- /dev/null
+++ b/wlauto/instrumentation/delay/__init__.py
@@ -0,0 +1,181 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+#pylint: disable=W0613,E1101,E0203,W0201
+import time
+
+from wlauto import Instrument, Parameter
+from wlauto.exceptions import ConfigError, InstrumentError
+from wlauto.utils.types import boolean
+
+
+class DelayInstrument(Instrument):
+
+ name = 'delay'
+ description = """
+ This instrument introduces a delay before executing either an iteration
+ or all iterations for a spec.
+
+ The delay may be specified as either a fixed period or a temperature
+ threshold that must be reached.
+
+ Optionally, if an active cooling solution is employed to speed up temperature drop between
+ runs, it may be controlled using this instrument.
+
+ """
+
+ parameters = [
+ Parameter('temperature_file', default='/sys/devices/virtual/thermal/thermal_zone0/temp',
+ global_alias='thermal_temp_file',
+ description="""Full path to the sysfile on the device that contains the device's
+ temperature."""),
+ Parameter('temperature_timeout', kind=int, default=600,
+ global_alias='thermal_timeout',
+ description="""
+ The timeout after which the instrument will stop waiting even if the specified threshold
+ temperature is not reached. If this timeout is hit, then a warning will be logged stating
+ the actual temperature at which the timeout has ended.
+ """),
+ Parameter('temperature_poll_period', kind=int, default=5,
+ global_alias='thermal_sleep_time',
+ description="""How long to sleep (in seconds) between polling current device temperature."""),
+ Parameter('temperature_between_specs', kind=int, default=None,
+ global_alias='thermal_threshold_between_specs',
+ description="""
+ Temperature (in device-specific units) the device must cool down to before
+ the iteration spec will be run.
+
+ .. note:: This cannot be specified at the same time as ``fixed_between_specs``
+
+ """),
+ Parameter('temperature_between_iterations', kind=int, default=None,
+ global_alias='thermal_threshold_between_iterations',
+ description="""
+ Temperature (in device-specific units) the device must cool down to before
+ the next spec will be run.
+
+ .. note:: This cannot be specified at the same time as ``fixed_between_iterations``
+
+ """),
+ Parameter('temperature_before_start', kind=int, default=None,
+ global_alias='thermal_threshold_before_start',
+ description="""
+ Temperature (in device-specific units) the device must cool down to just before
+ the actual workload execution (after setup has been performed).
+
+ .. note:: This cannot be specified at the same time as ``fixed_between_iterations``
+
+ """),
+ Parameter('fixed_between_specs', kind=int, default=None,
+ global_alias='fixed_delay_between_specs',
+ description="""
+ How long to sleep (in seconds) after all iterations for a workload spec have
+ executed.
+
+ .. note:: This cannot be specified at the same time as ``temperature_between_specs``
+
+ """),
+ Parameter('fixed_between_iterations', kind=int, default=None,
+ global_alias='fixed_delay_between_iterations',
+ description="""
+ How long to sleep (in seconds) after each iterations for a workload spec has
+ executed.
+
+ .. note:: This cannot be specified at the same time as ``temperature_between_iterations``
+
+ """),
+ Parameter('active_cooling', kind=boolean, default=False,
+ global_alias='thermal_active_cooling',
+ description="""
+ This instrument supports an active cooling solution while waiting for the device temperature
+ to drop to the threshold. The solution involves an mbed controlling a fan. The mbed is signaled
+ over a serial port. If this solution is present in the setup, this should be set to ``True``.
+ """),
+ ]
+
+ def initialize(self, context):
+ if self.temperature_between_iterations == 0:
+ temp = self.device.get_sysfile_value(self.temperature_file, int)
+ self.logger.debug('Setting temperature threshold between iterations to {}'.format(temp))
+ self.temperature_between_iterations = temp
+ if self.temperature_between_specs == 0:
+ temp = self.device.get_sysfile_value(self.temperature_file, int)
+ self.logger.debug('Setting temperature threshold between workload specs to {}'.format(temp))
+ self.temperature_between_specs = temp
+
+ def slow_on_iteration_start(self, context):
+ if self.active_cooling:
+ self.device.stop_active_cooling()
+ if self.fixed_between_iterations:
+ self.logger.debug('Waiting for a fixed period after iteration...')
+ time.sleep(self.fixed_between_iterations)
+ elif self.temperature_between_iterations:
+ self.logger.debug('Waiting for temperature drop before iteration...')
+ self.wait_for_temperature(self.temperature_between_iterations)
+
+ def slow_on_spec_start(self, context):
+ if self.active_cooling:
+ self.device.stop_active_cooling()
+ if self.fixed_between_specs:
+ self.logger.debug('Waiting for a fixed period after spec execution...')
+ time.sleep(self.fixed_between_specs)
+ elif self.temperature_between_specs:
+ self.logger.debug('Waiting for temperature drop before spec execution...')
+ self.wait_for_temperature(self.temperature_between_specs)
+
+ def very_slow_start(self, context):
+ if self.active_cooling:
+ self.device.stop_active_cooling()
+ if self.temperature_before_start:
+ self.logger.debug('Waiting for temperature drop before commencing execution...')
+ self.wait_for_temperature(self.temperature_before_start)
+
+ def wait_for_temperature(self, temperature):
+ if self.active_cooling:
+ self.device.start_active_cooling()
+ self.do_wait_for_temperature(temperature)
+ self.device.stop_active_cooling()
+ else:
+ self.do_wait_for_temperature(temperature)
+
+ def do_wait_for_temperature(self, temperature):
+ reading = self.device.get_sysfile_value(self.temperature_file, int)
+ waiting_start_time = time.time()
+ while reading > temperature:
+ self.logger.debug('Device temperature: {}'.format(reading))
+ if time.time() - waiting_start_time > self.temperature_timeout:
+ self.logger.warning('Reached timeout; current temperature: {}'.format(reading))
+ break
+ time.sleep(self.temperature_poll_period)
+ reading = self.device.get_sysfile_value(self.temperature_file, int)
+
+ def validate(self):
+ if (self.temperature_between_specs is not None and
+ self.fixed_between_specs is not None):
+ raise ConfigError('Both fixed delay and thermal threshold specified for specs.')
+
+ if (self.temperature_between_iterations is not None and
+ self.fixed_between_iterations is not None):
+ raise ConfigError('Both fixed delay and thermal threshold specified for iterations.')
+
+ if not any([self.temperature_between_specs, self.fixed_between_specs, self.temperature_before_start,
+ self.temperature_between_iterations, self.fixed_between_iterations]):
+ raise ConfigError('delay instrument is enabled, but no delay is specified.')
+
+ if self.active_cooling and not self.device.has('active_cooling'):
+ message = 'Your device does not support active cooling. Did you configure it with an approprite module?'
+ raise InstrumentError(message)
+
diff --git a/wlauto/instrumentation/dmesg/__init__.py b/wlauto/instrumentation/dmesg/__init__.py
new file mode 100644
index 00000000..2603d8a4
--- /dev/null
+++ b/wlauto/instrumentation/dmesg/__init__.py
@@ -0,0 +1,62 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import os
+
+from wlauto import Instrument, Parameter
+from wlauto.utils.misc import ensure_file_directory_exists as _f
+
+
+class DmesgInstrument(Instrument):
+ # pylint: disable=no-member,attribute-defined-outside-init
+ """
+ Collected dmesg output before and during the run.
+
+ """
+
+ name = 'dmesg'
+
+ parameters = [
+ Parameter('loglevel', kind=int, allowed_values=range(8),
+ description='Set loglevel for console output.')
+ ]
+
+ loglevel_file = '/proc/sys/kernel/printk'
+
+ def setup(self, context):
+ if self.loglevel:
+ self.old_loglevel = self.device.get_sysfile_value(self.loglevel_file)
+ self.device.set_sysfile_value(self.loglevel_file, self.loglevel, verify=False)
+ self.before_file = _f(os.path.join(context.output_directory, 'dmesg', 'before'))
+ self.after_file = _f(os.path.join(context.output_directory, 'dmesg', 'after'))
+
+ def slow_start(self, context):
+ with open(self.before_file, 'w') as wfh:
+ wfh.write(self.device.execute('dmesg'))
+ context.add_artifact('dmesg_before', self.before_file, kind='data')
+ if self.device.is_rooted:
+ self.device.execute('dmesg -c', as_root=True)
+
+ def slow_stop(self, context):
+ with open(self.after_file, 'w') as wfh:
+ wfh.write(self.device.execute('dmesg'))
+ context.add_artifact('dmesg_after', self.after_file, kind='data')
+
+ def teardown(self, context): # pylint: disable=unused-argument
+ if self.loglevel:
+ self.device.set_sysfile_value(self.loglevel_file, self.old_loglevel, verify=False)
+
+
diff --git a/wlauto/instrumentation/energy_probe/__init__.py b/wlauto/instrumentation/energy_probe/__init__.py
new file mode 100644
index 00000000..2a5466c8
--- /dev/null
+++ b/wlauto/instrumentation/energy_probe/__init__.py
@@ -0,0 +1,145 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,E1101,access-member-before-definition,attribute-defined-outside-init
+import os
+import subprocess
+import signal
+import struct
+import csv
+try:
+ import pandas
+except ImportError:
+ pandas = None
+
+from wlauto import Instrument, Parameter, Executable
+from wlauto.exceptions import InstrumentError, ConfigError
+from wlauto.utils.types import list_of_numbers
+
+
+class EnergyProbe(Instrument):
+
+ name = 'energy_probe'
+ description = """Collects power traces using the ARM energy probe.
+
+ This instrument requires ``caiman`` utility to be installed in the workload automation
+ host and be in the PATH. Caiman is part of DS-5 and should be in ``/path/to/DS-5/bin/`` .
+ Energy probe can simultaneously collect energy from up to 3 power rails.
+
+ To connect the energy probe on a rail, connect the white wire to the pin that is closer to the
+ Voltage source and the black wire to the pin that is closer to the load (the SoC or the device
+ you are probing). Between the pins there should be a shunt resistor of known resistance in the
+ range of 5 to 20 mOhm. The resistance of the shunt resistors is a mandatory parameter
+ ``resistor_values``.
+
+ .. note:: This instrument can process results a lot faster if python pandas is installed.
+ """
+
+ parameters = [
+ Parameter('resistor_values', kind=list_of_numbers, default=[],
+ description="""The value of shunt resistors. This is a mandatory parameter."""),
+ Parameter('labels', kind=list, default=[],
+ description="""Meaningful labels for each of the monitored rails."""),
+ ]
+
+ MAX_CHANNELS = 3
+
+ def __init__(self, device, **kwargs):
+ super(EnergyProbe, self).__init__(device, **kwargs)
+ self.attributes_per_sample = 3
+ self.bytes_per_sample = self.attributes_per_sample * 4
+ self.attributes = ['power', 'voltage', 'current']
+ for i, val in enumerate(self.resistor_values):
+ self.resistor_values[i] = int(1000 * float(val))
+
+ def validate(self):
+ if subprocess.call('which caiman', stdout=subprocess.PIPE, shell=True):
+ raise InstrumentError('caiman not in PATH. Cannot enable energy probe')
+ if not self.resistor_values:
+ raise ConfigError('At least one resistor value must be specified')
+ if len(self.resistor_values) > self.MAX_CHANNELS:
+ raise ConfigError('{} Channels where specified when Energy Probe supports up to {}'
+ .format(len(self.resistor_values), self.MAX_CHANNELS))
+ if pandas is None:
+ self.logger.warning("pandas package will significantly speed up this instrument")
+ self.logger.warning("to install it try: pip install pandas")
+
+ def setup(self, context):
+ if not self.labels:
+ self.labels = ["PORT_{}".format(channel) for channel, _ in enumerate(self.resistor_values)]
+ self.output_directory = os.path.join(context.output_directory, 'energy_probe')
+ rstring = ""
+ for i, rval in enumerate(self.resistor_values):
+ rstring += '-r {}:{} '.format(i, rval)
+ self.command = 'caiman -l {} {}'.format(rstring, self.output_directory)
+ os.makedirs(self.output_directory)
+
+ def start(self, context):
+ self.logger.debug(self.command)
+ self.caiman = subprocess.Popen(self.command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ preexec_fn=os.setpgrp,
+ shell=True)
+
+ def stop(self, context):
+ os.killpg(self.caiman.pid, signal.SIGTERM)
+
+ def update_result(self, context): # pylint: disable=too-many-locals
+ num_of_channels = len(self.resistor_values)
+ processed_data = [[] for _ in xrange(num_of_channels)]
+ filenames = [os.path.join(self.output_directory, '{}.csv'.format(label)) for label in self.labels]
+ struct_format = '{}I'.format(num_of_channels * self.attributes_per_sample)
+ not_a_full_row_seen = False
+ with open(os.path.join(self.output_directory, "0000000000"), "rb") as bfile:
+ while True:
+ data = bfile.read(num_of_channels * self.bytes_per_sample)
+ if data == '':
+ break
+ try:
+ unpacked_data = struct.unpack(struct_format, data)
+ except struct.error:
+ if not_a_full_row_seen:
+ self.logger.warn('possibly missaligned caiman raw data, row contained {} bytes'.format(len(data)))
+ continue
+ else:
+ not_a_full_row_seen = True
+ for i in xrange(num_of_channels):
+ index = i * self.attributes_per_sample
+ processed_data[i].append({attr: val for attr, val in
+ zip(self.attributes, unpacked_data[index:index + self.attributes_per_sample])})
+ for i, path in enumerate(filenames):
+ with open(path, 'w') as f:
+ if pandas is not None:
+ self._pandas_produce_csv(processed_data[i], f)
+ else:
+ self._slow_produce_csv(processed_data[i], f)
+
+ # pylint: disable=R0201
+ def _pandas_produce_csv(self, data, f):
+ dframe = pandas.DataFrame(data)
+ dframe = dframe / 1000.0
+ dframe.to_csv(f)
+
+ def _slow_produce_csv(self, data, f):
+ new_data = []
+ for entry in data:
+ new_data.append({key: val / 1000.0 for key, val in entry.items()})
+ writer = csv.DictWriter(f, self.attributes)
+ writer.writeheader()
+ writer.writerows(new_data)
+
diff --git a/wlauto/instrumentation/fps/__init__.py b/wlauto/instrumentation/fps/__init__.py
new file mode 100644
index 00000000..ecdd1bb6
--- /dev/null
+++ b/wlauto/instrumentation/fps/__init__.py
@@ -0,0 +1,298 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,E1101
+from __future__ import division
+import os
+import sys
+import time
+import csv
+import shutil
+import threading
+import errno
+import tempfile
+
+from distutils.version import LooseVersion
+
+
+from wlauto import Instrument, Parameter, IterationResult
+from wlauto.instrumentation import instrument_is_installed
+from wlauto.exceptions import (InstrumentError, WorkerThreadError, ConfigError,
+ DeviceNotRespondingError, TimeoutError)
+from wlauto.utils.types import boolean, numeric
+
+try:
+ import pandas as pd
+except ImportError:
+ pd = None
+
+
+VSYNC_INTERVAL = 16666667
+EPSYLON = 0.0001
+
+
+class FpsInstrument(Instrument):
+
+ name = 'fps'
+ description = """
+ Measures Frames Per Second (FPS) and associated metrics for a workload's main View.
+
+ .. note:: This instrument depends on pandas Python library (which is not part of standard
+ WA dependencies), so you will need to install that first, before you can use it.
+
+ The view is specified by the workload as ``view`` attribute. This defaults
+ to ``'SurfaceView'`` for game workloads, and ``None`` for non-game
+ workloads (as for them FPS mesurement usually doesn't make sense).
+ Individual workloads may override this.
+
+ This instrument adds four metrics to the results:
+
+ :FPS: Frames Per Second. This is the frame rate of the workload.
+ :frames: The total number of frames rendered during the execution of
+ the workload.
+ :janks: The number of "janks" that occured during execution of the
+ workload. Janks are sudden shifts in frame rate. They result
+ in a "stuttery" UI. See http://jankfree.org/jank-busters-io
+ :not_at_vsync: The number of frames that did not render in a single
+ vsync cycle.
+
+ """
+
+ parameters = [
+ Parameter('drop_threshold', kind=numeric, default=5,
+ description='Data points below this FPS will be dropped as they '
+ 'do not constitute "real" gameplay. The assumption '
+ 'being that while actually running, the FPS in the '
+ 'game will not drop below X frames per second, '
+ 'except on loading screens, menus, etc, which '
+ 'should not contribute to FPS calculation. '),
+ Parameter('keep_raw', kind=boolean, default=False,
+ description='If set to True, this will keep the raw dumpsys output '
+ 'in the results directory (this is maily used for debugging) '
+ 'Note: frames.csv with collected frames data will always be '
+ 'generated regardless of this setting.'),
+ Parameter('crash_check', kind=boolean, default=True,
+ description="""
+ Specifies wither the instrument should check for crashed content by examining
+ frame data. If this is set, ``execution_time`` instrument must also be installed.
+ The check is performed by using the measured FPS and exection time to estimate the expected
+ frames cound and comparing that against the measured frames count. The the ratio of
+ measured/expected is too low, then it is assumed that the content has crashed part way
+ during the run. What is "too low" is determined by ``crash_threshold``.
+
+ .. note:: This is not 100\% fool-proof. If the crash occurs sufficiently close to
+ workload's termination, it may not be detected. If this is expected, the
+ threshold may be adjusted up to compensate.
+ """),
+ Parameter('crash_threshold', kind=float, default=0.7,
+ description="""
+ Specifies the threshold used to decided whether a measured/expected frames ration indicates
+ a content crash. E.g. a value of ``0.75`` means the number of actual frames counted is a
+ quarter lower than expected, it will treated as a content crash.
+ """),
+ ]
+
+ clear_command = 'dumpsys SurfaceFlinger --latency-clear '
+
+ def __init__(self, device, **kwargs):
+ super(FpsInstrument, self).__init__(device, **kwargs)
+ self.collector = None
+ self.outfile = None
+ self.is_enabled = True
+
+ def validate(self):
+ if not pd or LooseVersion(pd.__version__) < LooseVersion('0.13.1'):
+ message = ('fps instrument requires pandas Python package (version 0.13.1 or higher) to be installed.\n'
+ 'You can install it with pip, e.g. "sudo pip install pandas"')
+ raise InstrumentError(message)
+ if self.crash_check and not instrument_is_installed('execution_time'):
+ raise ConfigError('execution_time instrument must be installed in order to check for content crash.')
+
+ def setup(self, context):
+ workload = context.workload
+ if hasattr(workload, 'view'):
+ self.outfile = os.path.join(context.output_directory, 'frames.csv')
+ self.collector = LatencyCollector(self.outfile, self.device, workload.view or '', self.keep_raw, self.logger)
+ self.device.execute(self.clear_command)
+ else:
+ self.logger.debug('Workload does not contain a view; disabling...')
+ self.is_enabled = False
+
+ def start(self, context):
+ if self.is_enabled:
+ self.logger.debug('Starting SurfaceFlinger collection...')
+ self.collector.start()
+
+ def stop(self, context):
+ if self.is_enabled and self.collector.is_alive():
+ self.logger.debug('Stopping SurfaceFlinger collection...')
+ self.collector.stop()
+
+ def update_result(self, context):
+ if self.is_enabled:
+ data = pd.read_csv(self.outfile)
+ if not data.empty: # pylint: disable=maybe-no-member
+ self._update_stats(context, data)
+ else:
+ context.result.add_metric('FPS', float('nan'))
+ context.result.add_metric('frame_count', 0)
+ context.result.add_metric('janks', 0)
+ context.result.add_metric('not_at_vsync', 0)
+
+ def slow_update_result(self, context):
+ result = context.result
+ if result.has_metric('execution_time'):
+ self.logger.debug('Checking for crashed content.')
+ exec_time = result['execution_time'].value
+ fps = result['FPS'].value
+ frames = result['frame_count'].value
+ if all([exec_time, fps, frames]):
+ expected_frames = fps * exec_time
+ ratio = frames / expected_frames
+ self.logger.debug('actual/expected frames: {:.2}'.format(ratio))
+ if ratio < self.crash_threshold:
+ self.logger.error('Content for {} appears to have crashed.'.format(context.spec.label))
+ result.status = IterationResult.FAILED
+ result.add_event('Content crash detected (actual/expected frames: {:.2}).'.format(ratio))
+
+ def _update_stats(self, context, data):
+ vsync_interval = self.collector.refresh_period
+ actual_present_time_deltas = (data.actual_present_time - data.actual_present_time.shift()).drop(0) # pylint: disable=E1103
+ vsyncs_to_compose = (actual_present_time_deltas / vsync_interval).apply(lambda x: int(round(x, 0)))
+ # drop values lower than drop_threshold FPS as real in-game frame
+ # rate is unlikely to drop below that (except on loading screens
+ # etc, which should not be factored in frame rate calculation).
+ keep_filter = (1.0 / (vsyncs_to_compose * (vsync_interval / 1e9))) > self.drop_threshold
+ filtered_vsyncs_to_compose = vsyncs_to_compose[keep_filter]
+ if not filtered_vsyncs_to_compose.empty:
+ total_vsyncs = filtered_vsyncs_to_compose.sum()
+ if total_vsyncs:
+ frame_count = filtered_vsyncs_to_compose.size
+ fps = 1e9 * frame_count / (vsync_interval * total_vsyncs)
+ context.result.add_metric('FPS', fps)
+ context.result.add_metric('frame_count', frame_count)
+ else:
+ context.result.add_metric('FPS', float('nan'))
+ context.result.add_metric('frame_count', 0)
+
+ vtc_deltas = filtered_vsyncs_to_compose - filtered_vsyncs_to_compose.shift()
+ vtc_deltas.index = range(0, vtc_deltas.size)
+ vtc_deltas = vtc_deltas.drop(0).abs()
+ janks = vtc_deltas.apply(lambda x: (x > EPSYLON) and 1 or 0).sum()
+ not_at_vsync = vsyncs_to_compose.apply(lambda x: (abs(x - 1.0) > EPSYLON) and 1 or 0).sum()
+ context.result.add_metric('janks', janks)
+ context.result.add_metric('not_at_vsync', not_at_vsync)
+ else: # no filtered_vsyncs_to_compose
+ context.result.add_metric('FPS', float('nan'))
+ context.result.add_metric('frame_count', 0)
+ context.result.add_metric('janks', 0)
+ context.result.add_metric('not_at_vsync', 0)
+
+
+class LatencyCollector(threading.Thread):
+
+ # Note: the size of the frames buffer for a particular surface is defined
+ # by NUM_FRAME_RECORDS inside android/services/surfaceflinger/FrameTracker.h.
+ # At the time of writing, this was hard-coded to 128. So at 60 fps
+ # (and there is no reason to go above that, as it matches vsync rate
+ # on pretty much all phones), there is just over 2 seconds' worth of
+ # frames in there. Hence the sleep time of 2 seconds between dumps.
+ #command_template = 'while (true); do dumpsys SurfaceFlinger --latency {}; sleep 2; done'
+ command_template = 'dumpsys SurfaceFlinger --latency {}'
+
+ def __init__(self, outfile, device, activity, keep_raw, logger):
+ super(LatencyCollector, self).__init__()
+ self.outfile = outfile
+ self.device = device
+ self.command = self.command_template.format(activity)
+ self.keep_raw = keep_raw
+ self.logger = logger
+ self.stop_signal = threading.Event()
+ self.frames = []
+ self.last_ready_time = 0
+ self.refresh_period = VSYNC_INTERVAL
+ self.drop_threshold = self.refresh_period * 1000
+ self.exc = None
+ self.unresponsive_count = 0
+
+ def run(self):
+ try:
+ self.logger.debug('SurfaceFlinger collection started.')
+ self.stop_signal.clear()
+ fd, temp_file = tempfile.mkstemp()
+ self.logger.debug('temp file: {}'.format(temp_file))
+ wfh = os.fdopen(fd, 'wb')
+ try:
+ while not self.stop_signal.is_set():
+ wfh.write(self.device.execute(self.command))
+ time.sleep(2)
+ finally:
+ wfh.close()
+ # TODO: this can happen after the run during results processing
+ with open(temp_file) as fh:
+ text = fh.read().replace('\r\n', '\n').replace('\r', '\n')
+ for line in text.split('\n'):
+ line = line.strip()
+ if line:
+ self._process_trace_line(line)
+ if self.keep_raw:
+ raw_file = os.path.join(os.path.dirname(self.outfile), 'surfaceflinger.raw')
+ shutil.copy(temp_file, raw_file)
+ os.unlink(temp_file)
+ except (DeviceNotRespondingError, TimeoutError): # pylint: disable=W0703
+ raise
+ except Exception, e: # pylint: disable=W0703
+ self.logger.warning('Exception on collector thread: {}({})'.format(e.__class__.__name__, e))
+ self.exc = WorkerThreadError(self.name, sys.exc_info())
+ self.logger.debug('SurfaceFlinger collection stopped.')
+
+ with open(self.outfile, 'w') as wfh:
+ writer = csv.writer(wfh)
+ writer.writerow(['desired_present_time', 'actual_present_time', 'frame_ready_time'])
+ writer.writerows(self.frames)
+ self.logger.debug('Frames data written.')
+
+ def stop(self):
+ self.stop_signal.set()
+ self.join()
+ if self.unresponsive_count:
+ message = 'SurfaceFlinger was unrepsonsive {} times.'.format(self.unresponsive_count)
+ if self.unresponsive_count > 10:
+ self.logger.warning(message)
+ else:
+ self.logger.debug(message)
+ if self.exc:
+ raise self.exc # pylint: disable=E0702
+ self.logger.debug('FSP collection complete.')
+
+ def _process_trace_line(self, line):
+ parts = line.split()
+ if len(parts) == 3:
+ desired_present_time, actual_present_time, frame_ready_time = map(int, parts)
+ if frame_ready_time <= self.last_ready_time:
+ return # duplicate frame
+ if (frame_ready_time - desired_present_time) > self.drop_threshold:
+ self.logger.debug('Dropping bogus frame {}.'.format(line))
+ return # bogus data
+ self.last_ready_time = frame_ready_time
+ self.frames.append((desired_present_time, actual_present_time, frame_ready_time))
+ elif len(parts) == 1:
+ self.refresh_period = int(parts[0])
+ self.drop_threshold = self.refresh_period * 10
+ elif 'SurfaceFlinger appears to be unresponsive, dumping anyways' in line:
+ self.unresponsive_count += 1
+ else:
+ self.logger.warning('Unexpected SurfaceFlinger dump output: {}'.format(line))
diff --git a/wlauto/instrumentation/hwmon/__init__.py b/wlauto/instrumentation/hwmon/__init__.py
new file mode 100644
index 00000000..598564f0
--- /dev/null
+++ b/wlauto/instrumentation/hwmon/__init__.py
@@ -0,0 +1,120 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,E1101
+from __future__ import division
+from collections import OrderedDict
+
+from wlauto import Parameter, Instrument
+from wlauto.exceptions import InstrumentError, ConfigError
+from wlauto.utils.hwmon import discover_sensors
+from wlauto.utils.types import list_of_strs
+
+
+# sensor_kind: (report_type, units, conversion)
+HWMON_SENSORS = {
+ 'energy': ('diff', 'Joules', lambda x: x / 10 ** 6),
+ 'temp': ('before/after', 'Celsius', lambda x: x / 10 ** 3),
+}
+
+HWMON_SENSOR_PRIORITIES = ['energy', 'temp']
+
+
+class HwmonInstrument(Instrument):
+
+ name = 'hwmon'
+ description = """
+ Hardware Monitor (hwmon) is a generic Linux kernel subsystem,
+ providing access to hardware monitoring components like temperature or
+ voltage/current sensors.
+
+ The following web page has more information:
+
+ http://blogs.arm.com/software-enablement/925-linux-hwmon-power-management-and-arm-ds-5-streamline/
+
+ You can specify which sensors HwmonInstrument looks for by specifying
+ hwmon_sensors in your config.py, e.g. ::
+
+ hwmon_sensors = ['energy', 'temp']
+
+ If this setting is not specified, it will look for all sensors it knows about.
+ Current valid values are::
+
+ :energy: Collect energy measurements and report energy consumed
+ during run execution (the diff of before and after readings)
+ in Joules.
+ :temp: Collect temperature measurements and report the before and
+ after readings in degrees Celsius.
+
+ """
+
+ parameters = [
+ Parameter('sensors', kind=list_of_strs, default=['energy', 'temp'],
+ description='The kinds of sensors hwmon instrument will look for')
+ ]
+
+ def __init__(self, device, **kwargs):
+ super(HwmonInstrument, self).__init__(device, **kwargs)
+
+ if self.sensors:
+ self.sensor_kinds = {}
+ for kind in self.sensors:
+ if kind in HWMON_SENSORS:
+ self.sensor_kinds[kind] = HWMON_SENSORS[kind]
+ else:
+ message = 'Unexpected sensor type: {}; must be in {}'.format(kind, HWMON_SENSORS.keys())
+ raise ConfigError(message)
+ else:
+ self.sensor_kinds = HWMON_SENSORS
+
+ self.sensors = []
+
+ def setup(self, context):
+ self.sensors = []
+ self.logger.debug('Searching for HWMON sensors.')
+ discovered_sensors = discover_sensors(self.device, self.sensor_kinds.keys())
+ for sensor in sorted(discovered_sensors, key=lambda s: HWMON_SENSOR_PRIORITIES.index(s.kind)):
+ self.logger.debug('Adding {}'.format(sensor.filepath))
+ self.sensors.append(sensor)
+ for sensor in self.sensors:
+ sensor.clear_readings()
+
+ def fast_start(self, context):
+ for sensor in reversed(self.sensors):
+ sensor.take_reading()
+
+ def fast_stop(self, context):
+ for sensor in self.sensors:
+ sensor.take_reading()
+
+ def update_result(self, context):
+ for sensor in self.sensors:
+ try:
+ report_type, units, conversion = HWMON_SENSORS[sensor.kind]
+ if report_type == 'diff':
+ before, after = sensor.readings
+ diff = conversion(after - before)
+ context.result.add_metric(sensor.label, diff, units)
+ elif report_type == 'before/after':
+ before, after = sensor.readings
+ context.result.add_metric(sensor.label + ' before', conversion(before), units)
+ context.result.add_metric(sensor.label + ' after', conversion(after), units)
+ else:
+ raise InstrumentError('Unexpected report_type: {}'.format(report_type))
+ except ValueError, e:
+ self.logger.error('Could not collect all {} readings for {}'.format(sensor.kind, sensor.label))
+ self.logger.error('Got: {}'.format(e))
+
diff --git a/wlauto/instrumentation/juno_energy/__init__.py b/wlauto/instrumentation/juno_energy/__init__.py
new file mode 100644
index 00000000..4c1a4a4c
--- /dev/null
+++ b/wlauto/instrumentation/juno_energy/__init__.py
@@ -0,0 +1,77 @@
+# Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,W0201
+import os
+import csv
+import time
+import threading
+import logging
+from operator import itemgetter
+
+from wlauto import Instrument, File, Parameter
+from wlauto.exceptions import InstrumentError
+
+
+class JunoEnergy(Instrument):
+
+ name = 'juno_energy'
+ description = """
+ Collects internal energy meter measurements from Juno development board.
+
+ This instrument was created because (at the time of creation) Juno's energy
+ meter measurements aren't exposed through HWMON or similar standardized mechanism,
+ necessitating a dedicated instrument to access them.
+
+ This instrument, and the ``readenergy`` executable it relies on are very much tied
+ to the Juno platform and are not expected to work on other boards.
+
+ """
+
+ parameters = [
+ Parameter('period', kind=float, default=0.1,
+ description='Specifies the time, in Seconds, between polling energy counters.'),
+ ]
+
+ def on_run_init(self, context):
+ local_file = context.resolver.get(File(self, 'readenergy'))
+ self.device.killall('readenergy', as_root=True)
+ self.readenergy = self.device.install(local_file)
+
+ def setup(self, context):
+ self.host_output_file = os.path.join(context.output_directory, 'energy.csv')
+ self.device_output_file = self.device.path.join(self.device.working_directory, 'energy.csv')
+ self.command = '{} -o {}'.format(self.readenergy, self.device_output_file)
+ self.device.killall('readenergy', as_root=True)
+
+ def start(self, context):
+ self.device.kick_off(self.command)
+
+ def stop(self, context):
+ self.device.killall('readenergy', signal='TERM', as_root=True)
+
+ def update_result(self, context):
+ self.device.pull_file(self.device_output_file, self.host_output_file)
+ context.add_artifact('junoenergy', self.host_output_file, 'data')
+
+ def teardown(self, conetext):
+ self.device.delete_file(self.device_output_file)
+
+ def validate(self):
+ if self.device.name.lower() != 'juno':
+ message = 'juno_energy instrument is only supported on juno devices; found {}'
+ raise InstrumentError(message.format(self.device.name))
+
diff --git a/wlauto/instrumentation/juno_energy/readenergy b/wlauto/instrumentation/juno_energy/readenergy
new file mode 100755
index 00000000..c26991c2
--- /dev/null
+++ b/wlauto/instrumentation/juno_energy/readenergy
Binary files differ
diff --git a/wlauto/instrumentation/misc/__init__.py b/wlauto/instrumentation/misc/__init__.py
new file mode 100644
index 00000000..6fc55de9
--- /dev/null
+++ b/wlauto/instrumentation/misc/__init__.py
@@ -0,0 +1,365 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,no-member,attribute-defined-outside-init
+"""
+
+Some "standard" instruments to collect additional info about workload execution.
+
+.. note:: The run() method of a Workload may perform some "boilerplate" as well as
+ the actual execution of the workload (e.g. it may contain UI automation
+ needed to start the workload). This "boilerplate" execution will also
+ be measured by these instruments. As such, they are not suitable for collected
+ precise data about specific operations.
+"""
+import os
+import re
+import logging
+import time
+import tarfile
+from itertools import izip, izip_longest
+from subprocess import CalledProcessError
+
+from wlauto import Instrument, Parameter
+from wlauto.core import signal
+from wlauto.exceptions import DeviceError
+from wlauto.utils.misc import diff_tokens, write_table, check_output, as_relative
+from wlauto.utils.misc import ensure_file_directory_exists as _f
+from wlauto.utils.misc import ensure_directory_exists as _d
+from wlauto.utils.android import ApkInfo
+from wlauto.utils.types import list_of_strings
+
+
+logger = logging.getLogger(__name__)
+
+
+class SysfsExtractor(Instrument):
+
+ name = 'sysfs_extractor'
+ description = """
+ Collects the contest of a set of directories, before and after workload execution
+ and diffs the result.
+
+ """
+
+ mount_command = 'mount -t tmpfs -o size={} tmpfs {}'
+ extract_timeout = 30
+ tarname = 'sysfs.tar.gz'
+
+ parameters = [
+ Parameter('paths', kind=list_of_strings, mandatory=True,
+ description="""A list of paths to be pulled from the device. These could be directories
+ as well as files.""",
+ global_alias='sysfs_extract_dirs'),
+ Parameter('tmpfs_mount_point', default=None,
+ description="""Mount point for tmpfs partition used to store snapshots of paths."""),
+ Parameter('tmpfs_size', default='32m',
+ description="""Size of the tempfs partition."""),
+ ]
+
+ def initialize(self, context):
+ if self.device.is_rooted:
+ self.on_device_before = self.device.path.join(self.tmpfs_mount_point, 'before')
+ self.on_device_after = self.device.path.join(self.tmpfs_mount_point, 'after')
+
+ if not self.device.file_exists(self.tmpfs_mount_point):
+ self.device.execute('mkdir -p {}'.format(self.tmpfs_mount_point), as_root=True)
+ self.device.execute(self.mount_command.format(self.tmpfs_size, self.tmpfs_mount_point),
+ as_root=True)
+
+ def setup(self, context):
+ self.before_dirs = [
+ _d(os.path.join(context.output_directory, 'before', self._local_dir(d)))
+ for d in self.paths
+ ]
+ self.after_dirs = [
+ _d(os.path.join(context.output_directory, 'after', self._local_dir(d)))
+ for d in self.paths
+ ]
+ self.diff_dirs = [
+ _d(os.path.join(context.output_directory, 'diff', self._local_dir(d)))
+ for d in self.paths
+ ]
+
+ if self.device.is_rooted:
+ for d in self.paths:
+ before_dir = self.device.path.join(self.on_device_before,
+ self.device.path.dirname(as_relative(d)))
+ after_dir = self.device.path.join(self.on_device_after,
+ self.device.path.dirname(as_relative(d)))
+ if self.device.file_exists(before_dir):
+ self.device.execute('rm -rf {}'.format(before_dir), as_root=True)
+ self.device.execute('mkdir -p {}'.format(before_dir), as_root=True)
+ if self.device.file_exists(after_dir):
+ self.device.execute('rm -rf {}'.format(after_dir), as_root=True)
+ self.device.execute('mkdir -p {}'.format(after_dir), as_root=True)
+
+ def slow_start(self, context):
+ if self.device.is_rooted:
+ for d in self.paths:
+ dest_dir = self.device.path.join(self.on_device_before, as_relative(d))
+ if '*' in dest_dir:
+ dest_dir = self.device.path.dirname(dest_dir)
+ self.device.execute('busybox cp -Hr {} {}'.format(d, dest_dir),
+ as_root=True, check_exit_code=False)
+ else: # not rooted
+ for dev_dir, before_dir in zip(self.paths, self.before_dirs):
+ self.device.pull_file(dev_dir, before_dir)
+
+ def slow_stop(self, context):
+ if self.device.is_rooted:
+ for d in self.paths:
+ dest_dir = self.device.path.join(self.on_device_after, as_relative(d))
+ if '*' in dest_dir:
+ dest_dir = self.device.path.dirname(dest_dir)
+ self.device.execute('busybox cp -Hr {} {}'.format(d, dest_dir),
+ as_root=True, check_exit_code=False)
+ else: # not rooted
+ for dev_dir, after_dir in zip(self.paths, self.after_dirs):
+ self.device.pull_file(dev_dir, after_dir)
+
+ def update_result(self, context):
+ if self.device.is_rooted:
+ on_device_tarball = self.device.path.join(self.device.working_directory, self.tarname)
+ on_host_tarball = self.device.path.join(context.output_directory, self.tarname)
+ self.device.execute('busybox tar czf {} -C {} .'.format(on_device_tarball, self.tmpfs_mount_point),
+ as_root=True)
+ self.device.execute('chmod 0777 {}'.format(on_device_tarball), as_root=True)
+ self.device.pull_file(on_device_tarball, on_host_tarball)
+ with tarfile.open(on_host_tarball, 'r:gz') as tf:
+ tf.extractall(context.output_directory)
+ self.device.delete_file(on_device_tarball)
+ os.remove(on_host_tarball)
+
+ for after_dir in self.after_dirs:
+ if not os.listdir(after_dir):
+ self.logger.error('sysfs files were not pulled from the device.')
+ return
+ for diff_dir, before_dir, after_dir in zip(self.diff_dirs, self.before_dirs, self.after_dirs):
+ _diff_sysfs_dirs(before_dir, after_dir, diff_dir)
+
+ def teardown(self, context):
+ self._one_time_setup_done = []
+
+ def finalize(self, context):
+ if self.device.is_rooted:
+ try:
+ self.device.execute('umount {}'.format(self.tmpfs_mount_point), as_root=True)
+ except (DeviceError, CalledProcessError):
+ # assume a directory but not mount point
+ pass
+ self.device.execute('rm -rf {}'.format(self.tmpfs_mount_point), as_root=True)
+
+ def validate(self):
+ if not self.tmpfs_mount_point: # pylint: disable=access-member-before-definition
+ self.tmpfs_mount_point = self.device.path.join(self.device.working_directory, 'temp-fs')
+
+ def _local_dir(self, directory):
+ return os.path.dirname(as_relative(directory).replace(self.device.path.sep, os.sep))
+
+
+class ExecutionTimeInstrument(Instrument):
+
+ name = 'execution_time'
+ description = """
+ Measure how long it took to execute the run() methods of a Workload.
+
+ """
+
+ priority = 15
+
+ def __init__(self, device, **kwargs):
+ super(ExecutionTimeInstrument, self).__init__(device, **kwargs)
+ self.start_time = None
+ self.end_time = None
+
+ def on_run_start(self, context):
+ signal.connect(self.get_start_time, signal.BEFORE_WORKLOAD_EXECUTION, priority=self.priority)
+ signal.connect(self.get_stop_time, signal.AFTER_WORKLOAD_EXECUTION, priority=self.priority)
+
+ def get_start_time(self, context):
+ self.start_time = time.time()
+
+ def get_stop_time(self, context):
+ self.end_time = time.time()
+
+ def update_result(self, context):
+ execution_time = self.end_time - self.start_time
+ context.result.add_metric('execution_time', execution_time, 'seconds')
+
+
+class ApkVersion(Instrument):
+
+ name = 'apk_version'
+ description = """
+ Extracts APK versions for workloads that have them.
+
+ """
+
+ def __init__(self, device, **kwargs):
+ super(ApkVersion, self).__init__(device, **kwargs)
+ self.apk_info = None
+
+ def setup(self, context):
+ if hasattr(context.workload, 'apk_file'):
+ self.apk_info = ApkInfo(context.workload.apk_file)
+ else:
+ self.apk_info = None
+
+ def update_result(self, context):
+ if self.apk_info:
+ context.result.add_metric(self.name, self.apk_info.version_name)
+
+
+class InterruptStatsInstrument(Instrument):
+
+ name = 'interrupts'
+ description = """
+ Pulls the ``/proc/interrupts`` file before and after workload execution and diffs them
+ to show what interrupts occurred during that time.
+
+ """
+
+ def __init__(self, device, **kwargs):
+ super(InterruptStatsInstrument, self).__init__(device, **kwargs)
+ self.before_file = None
+ self.after_file = None
+ self.diff_file = None
+
+ def setup(self, context):
+ self.before_file = os.path.join(context.output_directory, 'before', 'proc', 'interrupts')
+ self.after_file = os.path.join(context.output_directory, 'after', 'proc', 'interrupts')
+ self.diff_file = os.path.join(context.output_directory, 'diff', 'proc', 'interrupts')
+
+ def start(self, context):
+ with open(_f(self.before_file), 'w') as wfh:
+ wfh.write(self.device.execute('cat /proc/interrupts'))
+
+ def stop(self, context):
+ with open(_f(self.after_file), 'w') as wfh:
+ wfh.write(self.device.execute('cat /proc/interrupts'))
+
+ def update_result(self, context):
+ # If workload execution failed, the after_file may not have been created.
+ if os.path.isfile(self.after_file):
+ _diff_interrupt_files(self.before_file, self.after_file, _f(self.diff_file))
+
+
+class DynamicFrequencyInstrument(SysfsExtractor):
+
+ name = 'cpufreq'
+ description = """
+ Collects dynamic frequency (DVFS) settings before and after workload execution.
+
+ """
+
+ tarname = 'cpufreq.tar.gz'
+
+ parameters = [
+ Parameter('paths', mandatory=False, override=True),
+ ]
+
+ def setup(self, context):
+ self.paths = ['/sys/devices/system/cpu']
+ if self.device.is_rooted:
+ self.paths.append('/sys/class/devfreq/*') # the '*' would cause problems for adb pull.
+ super(DynamicFrequencyInstrument, self).setup(context)
+
+ def validate(self):
+ # temp-fs would have been set in super's validate, if not explicitly specified.
+ if not self.tmpfs_mount_point.endswith('-cpufreq'): # pylint: disable=access-member-before-definition
+ self.tmpfs_mount_point += '-cpufreq'
+
+
+def _diff_interrupt_files(before, after, result): # pylint: disable=R0914
+ output_lines = []
+ with open(before) as bfh:
+ with open(after) as ofh:
+ for bline, aline in izip(bfh, ofh):
+ bchunks = bline.strip().split()
+ while True:
+ achunks = aline.strip().split()
+ if achunks[0] == bchunks[0]:
+ diffchunks = ['']
+ diffchunks.append(achunks[0])
+ diffchunks.extend([diff_tokens(b, a) for b, a
+ in zip(bchunks[1:], achunks[1:])])
+ output_lines.append(diffchunks)
+ break
+ else: # new category appeared in the after file
+ diffchunks = ['>'] + achunks
+ output_lines.append(diffchunks)
+ try:
+ aline = ofh.next()
+ except StopIteration:
+ break
+
+ # Offset heading columns by one to allow for row labels on subsequent
+ # lines.
+ output_lines[0].insert(0, '')
+
+ # Any "columns" that do not have headings in the first row are not actually
+ # columns -- they are a single column where space-spearated words got
+ # split. Merge them back together to prevent them from being
+ # column-aligned by write_table.
+ table_rows = [output_lines[0]]
+ num_cols = len(output_lines[0])
+ for row in output_lines[1:]:
+ table_row = row[:num_cols]
+ table_row.append(' '.join(row[num_cols:]))
+ table_rows.append(table_row)
+
+ with open(result, 'w') as wfh:
+ write_table(table_rows, wfh)
+
+
+def _diff_sysfs_dirs(before, after, result): # pylint: disable=R0914
+ before_files = []
+ os.path.walk(before,
+ lambda arg, dirname, names: arg.extend([os.path.join(dirname, f) for f in names]),
+ before_files
+ )
+ before_files = filter(os.path.isfile, before_files)
+ files = [os.path.relpath(f, before) for f in before_files]
+ after_files = [os.path.join(after, f) for f in files]
+ diff_files = [os.path.join(result, f) for f in files]
+
+ for bfile, afile, dfile in zip(before_files, after_files, diff_files):
+ if not os.path.isfile(afile):
+ logger.debug('sysfs_diff: {} does not exist or is not a file'.format(afile))
+ continue
+
+ with open(bfile) as bfh, open(afile) as afh: # pylint: disable=C0321
+ with open(_f(dfile), 'w') as dfh:
+ for i, (bline, aline) in enumerate(izip_longest(bfh, afh), 1):
+ if aline is None:
+ logger.debug('Lines missing from {}'.format(afile))
+ break
+ bchunks = re.split(r'(\W+)', bline)
+ achunks = re.split(r'(\W+)', aline)
+ if len(bchunks) != len(achunks):
+ logger.debug('Token length mismatch in {} on line {}'.format(bfile, i))
+ dfh.write('xxx ' + bline)
+ continue
+ if ((len([c for c in bchunks if c.strip()]) == len([c for c in achunks if c.strip()]) == 2) and
+ (bchunks[0] == achunks[0])):
+ # if there are only two columns and the first column is the
+ # same, assume it's a "header" column and do not diff it.
+ dchunks = [bchunks[0]] + [diff_tokens(b, a) for b, a in zip(bchunks[1:], achunks[1:])]
+ else:
+ dchunks = [diff_tokens(b, a) for b, a in zip(bchunks, achunks)]
+ dfh.write(''.join(dchunks))
+
diff --git a/wlauto/instrumentation/perf/LICENSE b/wlauto/instrumentation/perf/LICENSE
new file mode 100644
index 00000000..99f70b0d
--- /dev/null
+++ b/wlauto/instrumentation/perf/LICENSE
@@ -0,0 +1,9 @@
+perf binaries included here are part of the Linux kernel and are distributed
+under GPL version 2; The full text of the license may be viewed here:
+
+http://www.gnu.org/licenses/gpl-2.0.html
+
+Source for these binaries is part of Linux Kernel source tree. This may be obtained
+from Linaro here:
+
+https://git.linaro.org/arm/big.LITTLE/mp.git
diff --git a/wlauto/instrumentation/perf/__init__.py b/wlauto/instrumentation/perf/__init__.py
new file mode 100644
index 00000000..523ae2e0
--- /dev/null
+++ b/wlauto/instrumentation/perf/__init__.py
@@ -0,0 +1,176 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,E1101,W0201
+import os
+import re
+import itertools
+
+
+from wlauto import Instrument, Executable, Parameter
+from wlauto.exceptions import ConfigError
+from wlauto.utils.misc import ensure_file_directory_exists as _f
+from wlauto.utils.types import list_or_string, list_of_strs
+
+PERF_COMMAND_TEMPLATE = '{} stat {} {} sleep 1000 > {} 2>&1 '
+
+DEVICE_RESULTS_FILE = '/data/local/perf_results.txt'
+HOST_RESULTS_FILE_BASENAME = 'perf.txt'
+
+PERF_COUNT_REGEX = re.compile(r'^\s*(\d+)\s*(.*?)\s*(\[\s*\d+\.\d+%\s*\])?\s*$')
+
+
+class PerfInstrument(Instrument):
+
+ name = 'perf'
+ description = """
+ Perf is a Linux profiling with performance counters.
+
+ Performance counters are CPU hardware registers that count hardware events
+ such as instructions executed, cache-misses suffered, or branches
+ mispredicted. They form a basis for profiling applications to trace dynamic
+ control flow and identify hotspots.
+
+ pref accepts options and events. If no option is given the default '-a' is
+ used. For events, the default events are migrations and cs. They both can
+ be specified in the config file.
+
+ Events must be provided as a list that contains them and they will look like
+ this ::
+
+ perf_events = ['migrations', 'cs']
+
+ Events can be obtained by typing the following in the command line on the
+ device ::
+
+ perf list
+
+ Whereas options, they can be provided as a single string as following ::
+
+ perf_options = '-a -i'
+
+ Options can be obtained by running the following in the command line ::
+
+ man perf-record
+ """
+
+ parameters = [
+ Parameter('events', kind=list_of_strs, default=['migrations', 'cs'],
+ constraint=(lambda x: x, 'must not be empty.'),
+ description="""Specifies the events to be counted."""),
+ Parameter('optionstring', kind=list_or_string, default='-a',
+ description="""Specifies options to be used for the perf command. This
+ may be a list of option strings, in which case, multiple instances of perf
+ will be kicked off -- one for each option string. This may be used to e.g.
+ collected different events from different big.LITTLE clusters.
+ """),
+ Parameter('labels', kind=list_of_strs, default=None,
+ description="""Provides labels for pref output. If specified, the number of
+ labels must match the number of ``optionstring``\ s.
+ """),
+ ]
+
+ def on_run_init(self, context):
+ if not self.device.is_installed('perf'):
+ binary = context.resolver.get(Executable(self, self.device.abi, 'perf'))
+ self.device.install(binary)
+ self.commands = self._build_commands()
+
+ def setup(self, context):
+ self._clean_device()
+
+ def start(self, context):
+ for command in self.commands:
+ self.device.kick_off(command)
+
+ def stop(self, context):
+ self.device.killall('sleep')
+
+ def update_result(self, context):
+ for label in self.labels:
+ device_file = self._get_device_outfile(label)
+ host_relpath = os.path.join('perf', os.path.basename(device_file))
+ host_file = _f(os.path.join(context.output_directory, host_relpath))
+ self.device.pull_file(device_file, host_file)
+ context.add_iteration_artifact(label, kind='raw', path=host_relpath)
+ with open(host_file) as fh:
+ in_results_section = False
+ for line in fh:
+ if 'Performance counter stats' in line:
+ in_results_section = True
+ fh.next() # skip the following blank line
+ if in_results_section:
+ if not line.strip(): # blank line
+ in_results_section = False
+ break
+ else:
+ line = line.split('#')[0] # comment
+ match = PERF_COUNT_REGEX.search(line)
+ if match:
+ count = int(match.group(1))
+ metric = '{}_{}'.format(label, match.group(2))
+ context.result.add_metric(metric, count)
+
+ def teardown(self, context): # pylint: disable=R0201
+ self._clean_device()
+
+ def validate(self):
+ if isinstance(self.optionstring, list):
+ self.optionstrings = self.optionstring
+ else:
+ self.optionstrings = [self.optionstring]
+ if isinstance(self.events[0], list): # we know events are non-empty due to param constraint pylint: disable=access-member-before-definition
+ self.events = self.events
+ else:
+ self.events = [self.events]
+ if not self.labels: # pylint: disable=E0203
+ self.labels = ['perf_{}'.format(i) for i in xrange(len(self.optionstrings))]
+ if not len(self.labels) == len(self.optionstrings):
+ raise ConfigError('The number of labels must match the number of optstrings provided for perf.')
+
+ def _build_commands(self):
+ events = itertools.cycle(self.events)
+ commands = []
+ for opts, label in itertools.izip(self.optionstrings, self.labels):
+ commands.append(self._build_perf_command(opts, events.next(), label))
+ return commands
+
+ def _clean_device(self):
+ for label in self.labels:
+ filepath = self._get_device_outfile(label)
+ self.device.delete_file(filepath)
+
+ def _get_device_outfile(self, label):
+ return self.device.path.join(self.device.working_directory, '{}.out'.format(label))
+
+ def _build_perf_command(self, options, events, label):
+ event_string = ' '.join(['-e {}'.format(e) for e in events])
+ command = PERF_COMMAND_TEMPLATE.format('perf',
+ options or '',
+ event_string,
+ self._get_device_outfile(label))
+ return command
+
+
+class CCIPerfEvent(object):
+
+ def __init__(self, name, config):
+ self.name = name
+ self.config = config
+
+ def __str__(self):
+ return 'CCI/config={config},name={name}/'.format(**self.__dict__)
+
diff --git a/wlauto/instrumentation/perf/bin/arm64/perf b/wlauto/instrumentation/perf/bin/arm64/perf
new file mode 100755
index 00000000..5ec37c76
--- /dev/null
+++ b/wlauto/instrumentation/perf/bin/arm64/perf
Binary files differ
diff --git a/wlauto/instrumentation/perf/bin/armeabi/perf b/wlauto/instrumentation/perf/bin/armeabi/perf
new file mode 100755
index 00000000..5a52db56
--- /dev/null
+++ b/wlauto/instrumentation/perf/bin/armeabi/perf
Binary files differ
diff --git a/wlauto/instrumentation/pmu_logger/__init__.py b/wlauto/instrumentation/pmu_logger/__init__.py
new file mode 100644
index 00000000..1a9a0adb
--- /dev/null
+++ b/wlauto/instrumentation/pmu_logger/__init__.py
@@ -0,0 +1,148 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,E1101,W0201
+import os
+import re
+import csv
+
+from wlauto import Instrument, settings, Parameter
+from wlauto.instrumentation import instrument_is_installed
+from wlauto.exceptions import ConfigError
+from wlauto.utils.types import boolean
+
+
+NUMBER_OF_CCI_PMU_COUNTERS = 4
+DEFAULT_EVENTS = ['0x63', '0x6A', '0x83', '0x8A']
+DEFAULT_PERIOD = 10 # in jiffies
+
+CPL_BASE = '/sys/kernel/debug/cci_pmu_logger/'
+CPL_CONTROL_FILE = CPL_BASE + 'control'
+CPL_PERIOD_FILE = CPL_BASE + 'period_jiffies'
+
+DRIVER = 'pmu_logger.ko'
+
+REGEX = re.compile(r'(\d+(?:\.\d+)?):\s+bprint:.*Cycles:\s*(\S+)\s*Counter_0:\s*(\S+)\s*Counter_1:\s*(\S+)\s*Counter_2:\s*(\S+)\s*Counter_3:\s*(\S+)')
+
+
+class CciPmuLogger(Instrument):
+
+ name = "cci_pmu_logger"
+ description = """
+ This instrument allows collecting CCI counter data.
+
+ It relies on the pmu_logger.ko kernel driver, the source for which is
+ included with Workload Automation (see inside ``wlauto/external`` directory).
+ You will need to build this against your specific kernel. Once compiled, it needs
+ to be placed in the dependencies directory (usually ``~/.workload_uatomation/dependencies``).
+
+ .. note:: When compling pmu_logger.ko for a new hardware platform, you may need to
+ modify CCI_BASE inside pmu_logger.c to contain the base address of where
+ CCI is mapped in memory on your device.
+
+ This instrument relies on ``trace-cmd`` instrument to also be enabled. You should enable
+ at least ``'bprint'`` trace event.
+
+ """
+
+ parameters = [
+ Parameter('events', kind=list, default=DEFAULT_EVENTS,
+ description="""
+ A list of strings, each representing an event to be counted. The length
+ of the list cannot exceed the number of PMU counters available (4 in CCI-400).
+ If this is not specified, shareable read transactions and snoop hits on both
+ clusters will be counted by default. E.g. ``['0x63', '0x83']``.
+ """),
+ Parameter('event_labels', kind=list, default=[],
+ description="""
+ A list of labels to be used when reporting PMU counts. If specified,
+ this must be of the same length as ``cci_pmu_events``. If not specified,
+ events will be labeled "event_<event_number>".
+ """),
+ Parameter('period', kind=int, default=10,
+ description='The period (in jiffies) between counter reads.'),
+ Parameter('install_module', kind=boolean, default=True,
+ description="""
+ Specifies whether pmu_logger has been compiled as a .ko module that needs
+ to be installed by the instrument. (.ko binary must be in {}). If this is set
+ to ``False``, it will be assumed that pmu_logger has been compiled into the kernel,
+ or that it has been installed prior to the invocation of WA.
+ """.format(settings.dependencies_directory)),
+ ]
+
+ def on_run_init(self, context):
+ if self.install_module:
+ self.device_driver_file = self.device.path.join(self.device.working_directory, DRIVER)
+ host_driver_file = os.path.join(settings.dependencies_directory, DRIVER)
+ self.device.push_file(host_driver_file, self.device_driver_file)
+
+ def setup(self, context):
+ if self.install_module:
+ self.device.execute('insmod {}'.format(self.device_driver_file), check_exit_code=False)
+ self.device.set_sysfile_value(CPL_PERIOD_FILE, self.period)
+ for i, event in enumerate(self.events):
+ counter = CPL_BASE + 'counter{}'.format(i)
+ self.device.set_sysfile_value(counter, event, verify=False)
+
+ def start(self, context):
+ self.device.set_sysfile_value(CPL_CONTROL_FILE, 1, verify=False)
+
+ def stop(self, context):
+ self.device.set_sysfile_value(CPL_CONTROL_FILE, 1, verify=False)
+
+ # Doing result processing inside teardown because need to make sure that
+ # trace-cmd has processed its results and generated the trace.txt
+ def teardown(self, context):
+ trace_file = os.path.join(context.output_directory, 'trace.txt')
+ rows = [['timestamp', 'cycles'] + self.event_labels]
+ with open(trace_file) as fh:
+ for line in fh:
+ match = REGEX.search(line)
+ if match:
+ rows.append([
+ float(match.group(1)),
+ int(match.group(2), 16),
+ int(match.group(3), 16),
+ int(match.group(4), 16),
+ int(match.group(5), 16),
+ int(match.group(6), 16),
+ ])
+ output_file = os.path.join(context.output_directory, 'cci_counters.txt')
+ with open(output_file, 'wb') as wfh:
+ writer = csv.writer(wfh)
+ writer.writerows(rows)
+ context.add_iteration_artifact('cci_counters', path='cci_counters.txt', kind='data',
+ description='CCI PMU counter data.')
+
+ # summary metrics
+ sums = map(sum, zip(*(r[1:] for r in rows[1:])))
+ labels = ['cycles'] + self.event_labels
+ for label, value in zip(labels, sums):
+ context.result.add_metric('cci ' + label, value, lower_is_better=True)
+
+ # actual teardown
+ if self.install_module:
+ self.device.execute('rmmod pmu_logger', check_exit_code=False)
+
+ def validate(self):
+ if not instrument_is_installed('trace-cmd'):
+ raise ConfigError('To use cci_pmu_logger, trace-cmd instrument must also be enabled.')
+ if not self.event_labels: # pylint: disable=E0203
+ self.event_labels = ['event_{}'.format(e) for e in self.events]
+ elif not len(self.events) == len(self.event_labels):
+ raise ConfigError('cci_pmu_events and cci_pmu_event_labels must be of the same length.')
+ if len(self.events) > NUMBER_OF_CCI_PMU_COUNTERS:
+ raise ConfigError('The number cci_pmu_counters must be at most {}'.format(NUMBER_OF_CCI_PMU_COUNTERS))
diff --git a/wlauto/instrumentation/streamline/__init__.py b/wlauto/instrumentation/streamline/__init__.py
new file mode 100644
index 00000000..841c44b0
--- /dev/null
+++ b/wlauto/instrumentation/streamline/__init__.py
@@ -0,0 +1,298 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,E1101
+import os
+import signal
+import shutil
+import subprocess
+import logging
+import re
+
+from wlauto import settings, Instrument, Parameter, ResourceGetter, GetterPriority, File
+from wlauto.exceptions import InstrumentError, DeviceError, ResourceError
+from wlauto.utils.misc import ensure_file_directory_exists as _f
+from wlauto.utils.types import boolean
+from wlauto.utils.log import StreamLogger, LogWriter, LineLogWriter
+
+
+SESSION_TEXT_TEMPLATE = ('<?xml version="1.0" encoding="US-ASCII" ?>'
+ '<session'
+ ' version="1"'
+ ' output_path="x"'
+ ' call_stack_unwinding="no"'
+ ' parse_debug_info="no"'
+ ' high_resolution="no"'
+ ' buffer_mode="streaming"'
+ ' sample_rate="none"'
+ ' duration="0"'
+ ' target_host="127.0.0.1"'
+ ' target_port="{}"'
+ ' energy_cmd_line="{}">'
+ '</session>')
+
+VERSION_REGEX = re.compile(r'\(DS-5 v(.*?)\)')
+
+
+class StreamlineResourceGetter(ResourceGetter):
+
+ name = 'streamline_resource'
+ resource_type = 'file'
+ priority = GetterPriority.environment + 1 # run before standard enviroment resolvers.
+
+ dependencies_directory = os.path.join(settings.dependencies_directory, 'streamline')
+ old_dependencies_directory = os.path.join(settings.environment_root, 'streamline') # backwards compatibility
+
+ def get(self, resource, **kwargs):
+ if resource.owner.name != 'streamline':
+ return None
+ test_path = _f(os.path.join(self.dependencies_directory, resource.path))
+ if os.path.isfile(test_path):
+ return test_path
+ test_path = _f(os.path.join(self.old_dependencies_directory, resource.path))
+ if os.path.isfile(test_path):
+ return test_path
+
+
+class StreamlineInstrument(Instrument):
+
+ name = 'streamline'
+ description = """
+ Collect Streamline traces from the device.
+
+ .. note:: This instrument supports streamline that comes with DS-5 5.17 and later
+ earlier versions of streamline may not work correctly (or at all).
+
+ This Instrument allows collecting streamline traces (such as PMU counter values) from
+ the device. It assumes you have DS-5 (which Streamline is part of) installed on your
+ system, and that streamline command is somewhere in PATH.
+
+ Streamline works by connecting to gator service on the device. gator comes in two parts
+ a driver (gator.ko) and daemon (gatord). The driver needs to be compiled against your
+ kernel and both driver and daemon need to be compatible with your version of Streamline.
+ The best way to ensure compatibility is to build them from source which came with your
+ DS-5. gator source can be found in ::
+
+ /usr/local/DS-5/arm/gator
+
+ (the exact path may vary depending of where you have installed DS-5.) Please refer to the
+ README the accompanies the source for instructions on how to build it.
+
+ Once you have built the driver and the daemon, place the binaries into your
+ ~/.workload_automation/streamline/ directory (if you haven't tried running WA with
+ this instrument before, the streamline/ subdirectory might not exist, in which
+ case you will need to create it.
+
+ In order to specify which events should be captured, you need to provide a
+ configuration.xml for the gator. The easiest way to obtain this file is to export it
+ from event configuration dialog in DS-5 streamline GUI. The file should be called
+ "configuration.xml" and it be placed in the same directory as the gator binaries.
+
+ With that done, you can enable streamline traces by adding the following entry to
+ instrumentation list in your ~/.workload_automation/config.py
+
+ ::
+
+ instrumentation = [
+ # ...
+ 'streamline',
+ # ...
+ ]
+
+ You can also specify the following (optional) configuration in the same config file:
+
+ """
+ supported_platforms = ['android']
+
+ parameters = [
+ Parameter('port', default='8080',
+ description='Specifies the port on which streamline will connect to gator'),
+ Parameter('configxml', default=None,
+ description='streamline configuration XML file to be used. This must be '
+ 'an absolute path, though it may count the user home symbol (~)'),
+ Parameter('report', kind=boolean, default=False, global_alias='streamline_report_csv',
+ description='Specifies whether a report should be generated from streamline data.'),
+ Parameter('report_options', kind=str, default='-format csv',
+ description='A string with options that will be added to stramline -report command.'),
+ ]
+
+ daemon = 'gatord'
+ driver = 'gator.ko'
+ configuration_file_name = 'configuration.xml'
+
+ def __init__(self, device, **kwargs):
+ super(StreamlineInstrument, self).__init__(device, **kwargs)
+ self.streamline = None
+ self.session_file = None
+ self.capture_file = None
+ self.analysis_file = None
+ self.report_file = None
+ self.configuration_file = None
+ self.on_device_config = None
+ self.daemon_process = None
+ self.enabled = False
+ self.resource_getter = None
+
+ self.host_daemon_file = None
+ self.host_driver_file = None
+ self.device_driver_file = None
+
+ self._check_has_valid_display()
+
+ def on_run_start(self, context):
+ if subprocess.call('which caiman', stdout=subprocess.PIPE, shell=True):
+ raise InstrumentError('caiman not in PATH. Cannot enable Streamline tracing.')
+ p = subprocess.Popen('caiman --version 2>&1', stdout=subprocess.PIPE, shell=True)
+ out, _ = p.communicate()
+ match = VERSION_REGEX.search(out)
+ if not match:
+ raise InstrumentError('caiman not in PATH. Cannot enable Streamline tracing.')
+ version_tuple = tuple(map(int, match.group(1).split('.')))
+ if version_tuple < (5, 17):
+ raise InstrumentError('Need DS-5 v5.17 or greater; found v{}'.format(match.group(1)))
+ self.enabled = True
+ self.resource_getter = StreamlineResourceGetter(context.resolver)
+ self.resource_getter.register()
+
+ def on_run_end(self, context):
+ self.enabled = False
+ self.resource_getter.unregister()
+
+ def on_run_init(self, context):
+ try:
+ self.host_daemon_file = context.resolver.get(File(self, self.daemon))
+ self.logger.debug('Using daemon from {}.'.format(self.host_daemon_file))
+ self.device.killall(self.daemon) # in case a version is already running
+ self.device.install(self.host_daemon_file)
+ except ResourceError:
+ self.logger.debug('Using on-device daemon.')
+
+ try:
+ self.host_driver_file = context.resolver.get(File(self, self.driver))
+ self.logger.debug('Using driver from {}.'.format(self.host_driver_file))
+ self.device_driver_file = self.device.install(self.host_driver_file)
+ except ResourceError:
+ self.logger.debug('Using on-device driver.')
+
+ try:
+ self.configuration_file = (os.path.expanduser(self.configxml or '') or
+ context.resolver.get(File(self, self.configuration_file_name)))
+ self.logger.debug('Using {}'.format(self.configuration_file))
+ self.on_device_config = self.device.path.join(self.device.working_directory, 'configuration.xml')
+ shutil.copy(self.configuration_file, settings.meta_directory)
+ except ResourceError:
+ self.logger.debug('No configuration file was specfied.')
+
+ caiman_path = subprocess.check_output('which caiman', shell=True).strip() # pylint: disable=E1103
+ self.session_file = os.path.join(context.host_working_directory, 'streamline_session.xml')
+ with open(self.session_file, 'w') as wfh:
+ wfh.write(SESSION_TEXT_TEMPLATE.format(self.port, caiman_path))
+
+ def setup(self, context):
+ # Note: the config file needs to be copies on each iteration's setup
+ # because gator appears to "consume" it on invocation...
+ if self.configuration_file:
+ self.device.push_file(self.configuration_file, self.on_device_config)
+ self._initialize_daemon()
+ self.capture_file = _f(os.path.join(context.output_directory, 'streamline', 'capture.apc'))
+ self.report_file = _f(os.path.join(context.output_directory, 'streamline', 'streamline.csv'))
+
+ def start(self, context):
+ if self.enabled:
+ command = ['streamline', '-capture', self.session_file, '-output', self.capture_file]
+ self.streamline = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ preexec_fn=os.setpgrp)
+ outlogger = StreamLogger('streamline', self.streamline.stdout, klass=LineLogWriter)
+ errlogger = StreamLogger('streamline', self.streamline.stderr, klass=LineLogWriter)
+ outlogger.start()
+ errlogger.start()
+
+ def stop(self, context):
+ if self.enabled:
+ os.killpg(self.streamline.pid, signal.SIGTERM)
+
+ def update_result(self, context):
+ if self.enabled:
+ self._kill_daemon()
+ if self.report:
+ self.logger.debug('Creating report...')
+ command = ['streamline', '-report', self.capture_file, '-output', self.report_file]
+ command += self.report_options.split()
+ _run_streamline_command(command)
+ context.add_artifact('streamlinecsv', self.report_file, 'data')
+
+ def teardown(self, context):
+ self.device.delete_file(self.on_device_config)
+
+ def _check_has_valid_display(self): # pylint: disable=R0201
+ reason = None
+ if os.name == 'posix' and not os.getenv('DISPLAY'):
+ reason = 'DISPLAY is not set.'
+ else:
+ p = subprocess.Popen('xhost', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ _, error = p.communicate()
+ if p.returncode:
+ reason = 'Invalid DISPLAY; xhost returned: "{}".'.format(error.strip()) # pylint: disable=E1103
+ if reason:
+ raise InstrumentError('{}\nstreamline binary requires a valid display server to be running.'.format(reason))
+
+ def _initialize_daemon(self):
+ if self.device_driver_file:
+ try:
+ self.device.execute('insmod {}'.format(self.device_driver_file))
+ except DeviceError, e:
+ if 'File exists' not in e.message:
+ raise
+ self.logger.debug('Driver was already installed.')
+ self._start_daemon()
+ port_spec = 'tcp:{}'.format(self.port)
+ self.device.forward_port(port_spec, port_spec)
+
+ def _start_daemon(self):
+ self.logger.debug('Starting gatord')
+ self.device.killall('gatord', as_root=True)
+ if self.configuration_file:
+ command = '{} -c {}'.format(self.daemon, self.on_device_config)
+ else:
+
+ command = '{}'.format(self.daemon)
+
+ self.daemon_process = self.device.execute(command, as_root=True, background=True)
+ outlogger = StreamLogger('gatord', self.daemon_process.stdout)
+ errlogger = StreamLogger('gatord', self.daemon_process.stderr, logging.ERROR)
+ outlogger.start()
+ errlogger.start()
+ if self.daemon_process.poll() is not None:
+ # If adb returned, something went wrong.
+ raise InstrumentError('Could not start gatord.')
+
+ def _kill_daemon(self):
+ self.logger.debug('Killing daemon process.')
+ self.daemon_process.kill()
+
+
+def _run_streamline_command(command):
+ streamline = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE)
+ output, error = streamline.communicate()
+ LogWriter('streamline').write(output).close()
+ LogWriter('streamline').write(error).close()
+
diff --git a/wlauto/instrumentation/trace_cmd/LICENSE b/wlauto/instrumentation/trace_cmd/LICENSE
new file mode 100644
index 00000000..9d46c1a5
--- /dev/null
+++ b/wlauto/instrumentation/trace_cmd/LICENSE
@@ -0,0 +1,39 @@
+Included trace-cmd binaries are Free Software ditributed under GPLv2:
+
+/*
+ * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+The full text of the license may be viewed here:
+
+http://www.gnu.org/licenses/gpl-2.0.html
+
+Source code for trace-cmd may be obtained here:
+
+git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/trace-cmd.git
+
+Binaries included here contain modifications by ARM that, at the time of writing,
+have not yet made it into the above repository. The patches for these modifications
+are available here:
+
+http://article.gmane.org/gmane.linux.kernel/1869111
+http://article.gmane.org/gmane.linux.kernel/1869112
+
+
+
diff --git a/wlauto/instrumentation/trace_cmd/__init__.py b/wlauto/instrumentation/trace_cmd/__init__.py
new file mode 100644
index 00000000..fdd4f76b
--- /dev/null
+++ b/wlauto/instrumentation/trace_cmd/__init__.py
@@ -0,0 +1,322 @@
+# Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=W0613,E1101
+from __future__ import division
+import os
+import time
+import subprocess
+from collections import defaultdict
+
+from wlauto import Instrument, Parameter, Executable
+from wlauto.exceptions import InstrumentError, ConfigError
+from wlauto.core import signal
+from wlauto.utils.types import boolean
+
+OUTPUT_TRACE_FILE = 'trace.dat'
+OUTPUT_TEXT_FILE = '{}.txt'.format(os.path.splitext(OUTPUT_TRACE_FILE)[0])
+TIMEOUT = 180
+
+
+class TraceCmdInstrument(Instrument):
+
+ name = 'trace-cmd'
+ description = """
+ trace-cmd is an instrument which interacts with Ftrace Linux kernel internal
+ tracer
+
+ From trace-cmd man page:
+
+ trace-cmd command interacts with the Ftrace tracer that is built inside the
+ Linux kernel. It interfaces with the Ftrace specific files found in the
+ debugfs file system under the tracing directory.
+
+ trace-cmd reads a list of events it will trace, which can be specified in
+ the config file as follows ::
+
+ trace_events = ['irq*', 'power*']
+
+ If no event is specified in the config file, trace-cmd traces the following events:
+
+ - sched*
+ - irq*
+ - power*
+ - cpufreq_interactive*
+
+ The list of available events can be obtained by rooting and running the following
+ command line on the device ::
+
+ trace-cmd list
+
+ You may also specify ``trace_buffer_size`` setting which must be an integer that will
+ be used to set the ftrace buffer size. It will be interpreted as KB::
+
+ trace_cmd_buffer_size = 8000
+
+ The maximum buffer size varies from device to device, but there is a maximum and trying
+ to set buffer size beyound that will fail. If you plan on collecting a lot of trace over
+ long periods of time, the buffer size will not be enough and you will only get trace for
+ the last portion of your run. To deal with this you can set the ``trace_mode`` setting to
+ ``'record'`` (the default is ``'start'``)::
+
+ trace_cmd_mode = 'record'
+
+ This will cause trace-cmd to trace into file(s) on disk, rather than the buffer, and so the
+ limit for the max size of the trace is set by the storage available on device. Bear in mind
+ that ``'record'`` mode *is* more instrusive than the default, so if you do not plan on
+ generating a lot of trace, it is best to use the default ``'start'`` mode.
+
+ .. note:: Mode names correspend to the underlying trace-cmd exectuable's command used to
+ implement them. You can find out more about what is happening in each case from
+ trace-cmd documentation: https://lwn.net/Articles/341902/.
+
+ This instrument comes with an Android trace-cmd binary that will be copied and used on the
+ device, however post-processing will be done on-host and you must have trace-cmd installed and
+ in your path. On Ubuntu systems, this may be done with::
+
+ sudo apt-get install trace-cmd
+
+ """
+
+ parameters = [
+ Parameter('events', kind=list, default=['sched*', 'irq*', 'power*', 'cpufreq_interactive*'],
+ global_alias='trace_events',
+ description="""
+ Specifies the list of events to be traced. Each event in the list will be passed to
+ trace-cmd with -e parameter and must be in the format accepted by trace-cmd.
+ """),
+ Parameter('mode', default='start', allowed_values=['start', 'record'],
+ global_alias='trace_mode',
+ description="""
+ Trace can be collected using either 'start' or 'record' trace-cmd
+ commands. In 'start' mode, trace will be collected into the ftrace buffer;
+ in 'record' mode, trace will be written into a file on the device's file
+ system. 'start' mode is (in theory) less intrusive than 'record' mode, however
+ it is limited by the size of the ftrace buffer (which is configurable --
+ see ``buffer_size`` -- but only up to a point) and that may overflow
+ for long-running workloads, which will result in dropped events.
+ """),
+ Parameter('buffer_size', kind=int, default=None,
+ global_alias='trace_buffer_size',
+ description="""
+ Attempt to set ftrace buffer size to the specified value (in KB). Default buffer size
+ may need to be increased for long-running workloads, or if a large number
+ of events have been enabled. Note: there is a maximum size that the buffer can
+ be set, and that varies from device to device. Attempting to set buffer size higher
+ than this will fail. In that case, this instrument will set the size to the highest
+ possible value by going down from the specified size in ``buffer_size_step`` intervals.
+ """),
+ Parameter('buffer_size_step', kind=int, default=1000,
+ global_alias='trace_buffer_size_step',
+ description="""
+ Defines the decremental step used if the specified ``buffer_size`` could not be set.
+ This will be subtracted form the buffer size until set succeeds or size is reduced to
+ 1MB.
+ """),
+ Parameter('buffer_size_file', default='/d/tracing/buffer_size_kb',
+ description="""
+ Path to the debugs file that may be used to set ftrace buffer size. This should need
+ to be modified for the vast majority devices.
+ """),
+ Parameter('report', kind=boolean, default=True,
+ description="""
+ Specifies whether host-side reporting should be performed once the binary trace has been
+ pulled form the device.
+
+ .. note:: This requires the latest version of trace-cmd to be installed on the host (the
+ one in your distribution's repos may be too old).
+
+ """),
+ Parameter('no_install', kind=boolean, default=False,
+ description="""
+ Do not install the bundled trace-cmd and use the one on the device instead. If there is
+ not already a trace-cmd on the device, an error is raised.
+
+ """),
+ ]
+
+ def __init__(self, device, **kwargs):
+ super(TraceCmdInstrument, self).__init__(device, **kwargs)
+ self.trace_cmd = None
+ self.event_string = _build_trace_events(self.events)
+ self.output_file = os.path.join(self.device.working_directory, OUTPUT_TRACE_FILE)
+ self.temp_trace_file = self.device.path.join(self.device.working_directory, OUTPUT_TRACE_FILE)
+
+ def on_run_init(self, context):
+ if not self.device.is_rooted:
+ raise InstrumentError('trace-cmd instrument cannot be used on an unrooted device.')
+ if not self.no_install:
+ host_file = context.resolver.get(Executable(self, self.device.abi, 'trace-cmd'))
+ self.trace_cmd = self.device.install_executable(host_file)
+ else:
+ if not self.device.is_installed('trace-cmd'):
+ raise ConfigError('No trace-cmd found on device and no_install=True is specified.')
+ self.trace_cmd = 'trace-cmd'
+ # Register ourselves as absolute last event before and
+ # first after so we can mark the trace at the right time
+ signal.connect(self.insert_start_mark, signal.BEFORE_WORKLOAD_EXECUTION, priority=11)
+ signal.connect(self.insert_end_mark, signal.AFTER_WORKLOAD_EXECUTION, priority=11)
+
+ def setup(self, context):
+ if self.mode == 'start':
+ if self.buffer_size:
+ self._set_buffer_size()
+ self.device.execute('{} reset'.format(self.trace_cmd), as_root=True, timeout=180)
+ elif self.mode == 'record':
+ pass
+ else:
+ raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here
+
+ def start(self, context):
+ self.start_time = time.time() # pylint: disable=attribute-defined-outside-init
+ if self.mode == 'start':
+ self.device.execute('{} start {}'.format(self.trace_cmd, self.event_string), as_root=True)
+ elif self.mode == 'record':
+ self.device.kick_off('{} record -o {} {}'.format(self.trace_cmd, self.output_file, self.event_string))
+ else:
+ raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here
+
+ def stop(self, context):
+ self.stop_time = time.time() # pylint: disable=attribute-defined-outside-init
+ if self.mode == 'start':
+ self.device.execute('{} stop'.format(self.trace_cmd), timeout=60, as_root=True)
+ elif self.mode == 'record':
+ # There will be a trace-cmd worker process per CPU core plus a main
+ # control trace-cmd process. Interrupting the control process will
+ # trigger the generation of the single binary trace file.
+ trace_cmds = self.device.ps(name=self.trace_cmd)
+ if not trace_cmds:
+ raise InstrumentError('Could not find running trace-cmd on device.')
+ # The workers will have their PPID set to the PID of control.
+ parent_map = defaultdict(list)
+ for entry in trace_cmds:
+ parent_map[entry.ppid].append(entry.pid)
+ controls = [v[0] for _, v in parent_map.iteritems()
+ if len(v) == 1 and v[0] in parent_map]
+ if len(controls) > 1:
+ self.logger.warning('More than one trace-cmd instance found; stopping all of them.')
+ for c in controls:
+ self.device.kill(c, signal='INT', as_root=True)
+ else:
+ raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here
+
+ def update_result(self, context): # NOQA pylint: disable=R0912
+ if self.mode == 'start':
+ self.device.execute('{} extract -o {}'.format(self.trace_cmd, self.output_file),
+ timeout=TIMEOUT, as_root=True)
+ elif self.mode == 'record':
+ self.logger.debug('Waiting for trace.dat to be generated.')
+ while self.device.ps(name=self.trace_cmd):
+ time.sleep(2)
+ else:
+ raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here
+
+ # The size of trace.dat will depend on how long trace-cmd was running.
+ # Therefore timout for the pull command must also be adjusted
+ # accordingly.
+ pull_timeout = (self.stop_time - self.start_time)
+ self.device.pull_file(self.output_file, context.output_directory, timeout=pull_timeout)
+ context.add_iteration_artifact('bintrace', OUTPUT_TRACE_FILE, kind='data',
+ description='trace-cmd generated ftrace dump.')
+
+ local_trace_file = os.path.join(context.output_directory, OUTPUT_TRACE_FILE)
+ local_txt_trace_file = os.path.join(context.output_directory, OUTPUT_TEXT_FILE)
+
+ if self.report:
+ # To get the output of trace.dat, trace-cmd must be installed
+ # This is done host-side because the generated file is very large
+ if not os.path.isfile(local_trace_file):
+ self.logger.warning('Not generating trace.txt, as trace.bin does not exist.')
+ try:
+ command = 'trace-cmd report {} > {}'.format(local_trace_file, local_txt_trace_file)
+ self.logger.debug(command)
+ process = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True)
+ _, error = process.communicate()
+ if process.returncode:
+ raise InstrumentError('trace-cmd returned non-zero exit code {}'.format(process.returncode))
+ if error:
+ # logged at debug level, as trace-cmd always outputs some
+ # errors that seem benign.
+ self.logger.debug(error)
+ if os.path.isfile(local_txt_trace_file):
+ context.add_iteration_artifact('txttrace', OUTPUT_TEXT_FILE, kind='export',
+ description='trace-cmd generated ftrace dump.')
+ self.logger.debug('Verifying traces.')
+ with open(local_txt_trace_file) as fh:
+ for line in fh:
+ if 'EVENTS DROPPED' in line:
+ self.logger.warning('Dropped events detected.')
+ break
+ else:
+ self.logger.debug('Trace verified.')
+ else:
+ self.logger.warning('Could not generate trace.txt.')
+ except OSError:
+ raise InstrumentError('Could not find trace-cmd. Please make sure it is installed and is in PATH.')
+
+ def teardown(self, context):
+ self.device.delete_file(os.path.join(self.device.working_directory, OUTPUT_TRACE_FILE))
+
+ def on_run_end(self, context):
+ pass
+
+ def validate(self):
+ if self.report and os.system('which trace-cmd > /dev/null'):
+ raise InstrumentError('trace-cmd is not in PATH; is it installed?')
+ if self.buffer_size:
+ if self.mode == 'record':
+ self.logger.debug('trace_buffer_size specified with record mode; it will be ignored.')
+ else:
+ try:
+ int(self.buffer_size)
+ except ValueError:
+ raise ConfigError('trace_buffer_size must be an int.')
+
+ def insert_start_mark(self, context):
+ # trace marker appears in ftrace as an ftrace/print event with TRACE_MARKER_START in info field
+ self.device.set_sysfile_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_START", verify=False)
+
+ def insert_end_mark(self, context):
+ # trace marker appears in ftrace as an ftrace/print event with TRACE_MARKER_STOP in info field
+ self.device.set_sysfile_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_STOP", verify=False)
+
+ def _set_buffer_size(self):
+ target_buffer_size = self.buffer_size
+ attempt_buffer_size = target_buffer_size
+ buffer_size = 0
+ floor = 1000 if target_buffer_size > 1000 else target_buffer_size
+ while attempt_buffer_size >= floor:
+ self.device.set_sysfile_value(self.buffer_size_file, attempt_buffer_size, verify=False)
+ buffer_size = self.device.get_sysfile_value(self.buffer_size_file, kind=int)
+ if buffer_size == attempt_buffer_size:
+ break
+ else:
+ attempt_buffer_size -= self.buffer_size_step
+ if buffer_size == target_buffer_size:
+ return
+ while attempt_buffer_size < target_buffer_size:
+ attempt_buffer_size += self.buffer_size_step
+ self.device.set_sysfile_value(self.buffer_size_file, attempt_buffer_size, verify=False)
+ buffer_size = self.device.get_sysfile_value(self.buffer_size_file, kind=int)
+ if attempt_buffer_size != buffer_size:
+ self.logger.warning('Failed to set trace buffer size to {}, value set was {}'.format(target_buffer_size, buffer_size))
+ break
+
+
+def _build_trace_events(events):
+ event_string = ' '.join(['-e {}'.format(e) for e in events])
+ return event_string
+
diff --git a/wlauto/instrumentation/trace_cmd/bin/arm64/trace-cmd b/wlauto/instrumentation/trace_cmd/bin/arm64/trace-cmd
new file mode 100755
index 00000000..0d025d0d
--- /dev/null
+++ b/wlauto/instrumentation/trace_cmd/bin/arm64/trace-cmd
Binary files differ
diff --git a/wlauto/instrumentation/trace_cmd/bin/armeabi/trace-cmd b/wlauto/instrumentation/trace_cmd/bin/armeabi/trace-cmd
new file mode 100755
index 00000000..a4456627
--- /dev/null
+++ b/wlauto/instrumentation/trace_cmd/bin/armeabi/trace-cmd
Binary files differ